Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem

This commit is contained in:
John W. Linville 2014-03-21 14:02:04 -04:00
commit 49c0ca17ee
215 changed files with 13584 additions and 2819 deletions

View File

@ -0,0 +1,34 @@
* Texas Instruments TRF7970A RFID/NFC/15693 Transceiver
Required properties:
- compatible: Should be "ti,trf7970a".
- spi-max-frequency: Maximum SPI frequency (<= 2000000).
- interrupt-parent: phandle of parent interrupt handler.
- interrupts: A single interrupt specifier.
- ti,enable-gpios: Two GPIO entries used for 'EN' and 'EN2' pins on the
TRF7970A.
- vin-supply: Regulator for supply voltage to VIN pin
Optional SoC Specific Properties:
- pinctrl-names: Contains only one value - "default".
- pintctrl-0: Specifies the pin control groups used for this controller.
Example (for ARM-based BeagleBone with TRF7970A on SPI1):
&spi1 {
status = "okay";
nfc@0 {
compatible = "ti,trf7970a";
reg = <0>;
pinctrl-names = "default";
pinctrl-0 = <&trf7970a_default>;
spi-max-frequency = <2000000>;
interrupt-parent = <&gpio2>;
interrupts = <14 0>;
ti,enable-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>,
<&gpio2 5 GPIO_ACTIVE_LOW>;
vin-supply = <&ldo3_reg>;
status = "okay";
};
};

View File

@ -6117,6 +6117,7 @@ F: include/net/nfc/
F: include/uapi/linux/nfc.h
F: drivers/nfc/
F: include/linux/platform_data/pn544.h
F: Documentation/devicetree/bindings/net/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@primarydata.com>

View File

@ -89,6 +89,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0) },
{ USB_DEVICE(0x0CF3, 0x0036) },
{ USB_DEVICE(0x0CF3, 0x3004) },
{ USB_DEVICE(0x0CF3, 0x3005) },
{ USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x0CF3, 0x311E) },
@ -137,6 +138,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
@ -180,10 +182,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
}
memcpy(send_buf, firmware->data, 20);
if ((err = usb_control_msg(udev, pipe,
USB_REQ_DFU_DNLOAD,
USB_TYPE_VENDOR, 0, 0,
send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR,
0, 0, send_buf, 20, USB_CTRL_SET_TIMEOUT);
if (err < 0) {
BT_ERR("Can't change to loading configuration err");
goto error;
}
@ -366,7 +367,7 @@ static int ath3k_load_patch(struct usb_device *udev)
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
fw_version.rom_version);
le32_to_cpu(fw_version.rom_version));
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
@ -428,7 +429,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
fw_version.rom_version, clk_value, ".dfu");
le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {

View File

@ -131,8 +131,11 @@ static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len);
if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
return -ENOMEM;
if (!urb) {
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
}
pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);
@ -218,8 +221,11 @@ static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb)
BT_DBG("bfusb %p urb %p", data, urb);
if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
return -ENOMEM;
if (!urb) {
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
}
skb = bt_skb_alloc(size, GFP_ATOMIC);
if (!skb) {

View File

@ -257,7 +257,8 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
ready_bit = XMIT_BUF_ONE_READY;
}
if (!(skb = skb_dequeue(&(info->txq))))
skb = skb_dequeue(&(info->txq));
if (!skb)
break;
if (bt_cb(skb)->pkt_type & 0x80) {
@ -391,7 +392,8 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}
@ -566,7 +568,8 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
/* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
if (!(skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate mem for new packet");
return -1;
}

View File

@ -193,8 +193,8 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
break;
if (!(skb = skb_dequeue(&(info->txq)))) {
skb = skb_dequeue(&(info->txq));
if (!skb) {
clear_bit(XMIT_SENDING, &(info->tx_state));
break;
}
@ -238,7 +238,8 @@ static void bt3c_receive(bt3c_info_t *info)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}

View File

@ -149,7 +149,8 @@ static void btuart_write_wakeup(btuart_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
return;
if (!(skb = skb_dequeue(&(info->txq))))
skb = skb_dequeue(&(info->txq));
if (!skb)
break;
/* Send frame */
@ -190,7 +191,8 @@ static void btuart_receive(btuart_info_t *info)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}

View File

@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },

View File

@ -153,7 +153,8 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
return;
if (!(skb = skb_dequeue(&(info->txq))))
skb = skb_dequeue(&(info->txq));
if (!skb)
break;
/* Send frame */
@ -215,13 +216,15 @@ static void dtl1_receive(dtl1_info_t *info)
info->hdev->stat.byte_rx++;
/* Allocate packet */
if (info->rx_skb == NULL)
if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
if (info->rx_skb == NULL) {
info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
return;
}
}
*skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
nsh = (nsh_t *)info->rx_skb->data;

View File

@ -291,7 +291,8 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
/* First of all, check for unreliable messages in the queue,
since they have priority */
if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) {
skb = skb_dequeue(&bcsp->unrel);
if (skb != NULL) {
struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
if (nskb) {
kfree_skb(skb);
@ -308,16 +309,20 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
if (nskb) {
__skb_queue_tail(&bcsp->unack, skb);
mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
spin_unlock_irqrestore(&bcsp->unack.lock, flags);
return nskb;
} else {
skb_queue_head(&bcsp->rel, skb);
BT_ERR("Could not dequeue pkt because alloc_skb failed");
if (bcsp->unack.qlen < BCSP_TXWINSIZE) {
skb = skb_dequeue(&bcsp->rel);
if (skb != NULL) {
struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
bt_cb(skb)->pkt_type);
if (nskb) {
__skb_queue_tail(&bcsp->unack, skb);
mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
spin_unlock_irqrestore(&bcsp->unack.lock, flags);
return nskb;
} else {
skb_queue_head(&bcsp->rel, skb);
BT_ERR("Could not dequeue pkt because alloc_skb failed");
}
}
}
@ -715,6 +720,9 @@ static int bcsp_open(struct hci_uart *hu)
static int bcsp_close(struct hci_uart *hu)
{
struct bcsp_struct *bcsp = hu->priv;
del_timer_sync(&bcsp->tbcsp);
hu->priv = NULL;
BT_DBG("hu %p", hu);
@ -722,7 +730,6 @@ static int bcsp_close(struct hci_uart *hu)
skb_queue_purge(&bcsp->unack);
skb_queue_purge(&bcsp->rel);
skb_queue_purge(&bcsp->unrel);
del_timer(&bcsp->tbcsp);
kfree(bcsp);
return 0;

View File

@ -206,12 +206,12 @@ static int h5_close(struct hci_uart *hu)
{
struct h5 *h5 = hu->priv;
del_timer_sync(&h5->timer);
skb_queue_purge(&h5->unack);
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
del_timer(&h5->timer);
kfree(h5);
return 0;
@ -673,7 +673,8 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
}
if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
skb = skb_dequeue(&h5->unrel);
if (skb != NULL) {
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
skb->data, skb->len);
if (nskb) {
@ -690,7 +691,8 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
if (h5->unack.qlen >= h5->tx_win)
goto unlock;
if ((skb = skb_dequeue(&h5->rel)) != NULL) {
skb = skb_dequeue(&h5->rel);
if (skb != NULL) {
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
skb->data, skb->len);
if (nskb) {

View File

@ -271,7 +271,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL);
if (!hu) {
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
@ -569,7 +570,8 @@ static int __init hci_uart_init(void)
hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
hci_uart_ldisc.owner = THIS_MODULE;
if ((err = tty_register_ldisc(N_HCI, &hci_uart_ldisc))) {
err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
if (err) {
BT_ERR("HCI line discipline registration failed. (%d)", err);
return err;
}
@ -614,7 +616,8 @@ static void __exit hci_uart_exit(void)
#endif
/* Release tty registration of line discipline */
if ((err = tty_unregister_ldisc(N_HCI)))
err = tty_unregister_ldisc(N_HCI);
if (err)
BT_ERR("Can't unregister HCI line discipline (%d)", err);
}

View File

@ -116,7 +116,7 @@ config AT76C50X_USB
config AIRO_CS
tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
depends on PCMCIA && (BROKEN || !M32R)
depends on CFG80211 && PCMCIA && (BROKEN || !M32R)
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
@ -281,5 +281,6 @@ source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
source "drivers/net/wireless/mwifiex/Kconfig"
source "drivers/net/wireless/cw1200/Kconfig"
source "drivers/net/wireless/rsi/Kconfig"
endif # WLAN

View File

@ -59,3 +59,4 @@ obj-$(CONFIG_BRCMFMAC) += brcm80211/
obj-$(CONFIG_BRCMSMAC) += brcm80211/
obj-$(CONFIG_CW1200) += cw1200/
obj-$(CONFIG_RSI_91X) += rsi/

View File

@ -56,6 +56,15 @@ enum ath_device_state {
ATH_HW_INITIALIZED,
};
enum ath_op_flags {
ATH_OP_INVALID,
ATH_OP_BEACONS,
ATH_OP_ANI_RUN,
ATH_OP_PRIM_STA_VIF,
ATH_OP_HW_RESET,
ATH_OP_SCANNING,
};
enum ath_bus_type {
ATH_PCI,
ATH_AHB,
@ -130,6 +139,7 @@ struct ath_common {
struct ieee80211_hw *hw;
int debug_mask;
enum ath_device_state state;
unsigned long op_flags;
struct ath_ani ani;

View File

@ -266,12 +266,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@ -1067,9 +1067,9 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
*
* For the lack of a better place do the check here.
*/
BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ret = ath10k_pci_wake(ar);

View File

@ -23,7 +23,7 @@
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 8
#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
@ -152,6 +152,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
unsigned int transfer_id,
unsigned int flags);
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);

View File

@ -62,16 +62,13 @@ struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
bool is_mapped;
bool is_aborted;
u8 vdev_id;
struct {
u8 tid;
bool is_offchan;
u8 frag_len;
u8 pad_len;
struct ath10k_htt_txbuf *txbuf;
u32 txbuf_paddr;
} __packed htt;
struct {
@ -87,32 +84,6 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}
static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
{
if (ATH10K_SKB_CB(skb)->is_mapped)
return -EINVAL;
ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
return -EIO;
ATH10K_SKB_CB(skb)->is_mapped = true;
return 0;
}
static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
{
if (!ATH10K_SKB_CB(skb)->is_mapped)
return -EINVAL;
dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
DMA_TO_DEVICE);
ATH10K_SKB_CB(skb)->is_mapped = false;
return 0;
}
static inline u32 host_interest_item_address(u32 item_offset)
{
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
@ -288,6 +259,7 @@ struct ath10k_vif {
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
};
struct ath10k_vif_iter {

View File

@ -21,6 +21,14 @@
#include <linux/kernel.h>
#include "core.h"
struct ath10k_hif_sg_item {
u16 transfer_id;
void *transfer_context; /* NULL = tx completion callback not called */
void *vaddr; /* for debugging mostly */
u32 paddr;
u16 len;
};
struct ath10k_hif_cb {
int (*tx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
@ -31,11 +39,9 @@ struct ath10k_hif_cb {
};
struct ath10k_hif_ops {
/* Send the head of a buffer to HIF for transmission to the target. */
int (*send_head)(struct ath10k *ar, u8 pipe_id,
unsigned int transfer_id,
unsigned int nbytes,
struct sk_buff *buf);
/* send a scatter-gather list to the target */
int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items);
/*
* API to handle HIF-specific BMI message exchanges, this API is
@ -86,12 +92,11 @@ struct ath10k_hif_ops {
};
static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
unsigned int transfer_id,
unsigned int nbytes,
struct sk_buff *buf)
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items,
int n_items)
{
return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
}
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,

View File

@ -63,7 +63,9 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
struct sk_buff *skb)
{
ath10k_skb_unmap(htc->ar->dev, skb);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
@ -122,6 +124,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev;
int credits = 0;
int ret;
@ -157,19 +162,25 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
ret = ath10k_skb_map(htc->ar->dev, skb);
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret)
goto err_credits;
ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
skb->len, skb);
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = skb;
sg_item.vaddr = skb->data;
sg_item.paddr = skb_cb->paddr;
sg_item.len = skb->len;
ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
goto err_unmap;
return 0;
err_unmap:
ath10k_skb_unmap(htc->ar->dev, skb);
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
@ -191,10 +202,8 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
if (!skb) {
ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
if (WARN_ON_ONCE(!skb))
return 0;
}
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */

View File

@ -20,6 +20,7 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include "htc.h"
#include "rx_desc.h"
@ -1181,11 +1182,20 @@ struct htt_rx_info {
u32 info1;
u32 info2;
} rate;
u32 tsf;
bool fcs_err;
bool amsdu_more;
bool mic_err;
};
struct ath10k_htt_txbuf {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
struct htt_cmd_hdr cmd_hdr;
struct htt_data_tx_desc cmd_tx;
} __packed;
struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
@ -1267,11 +1277,18 @@ struct ath10k_htt {
struct sk_buff **pending_tx;
unsigned long *used_msdu_ids; /* bitmap */
wait_queue_head_t empty_tx_wq;
struct dma_pool *tx_pool;
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
struct tasklet_struct rx_replenish_task;
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */
struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
};
#define RX_HTT_HDR_STATUS_LEN 64
@ -1343,4 +1360,5 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
#endif

View File

@ -43,7 +43,7 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
@ -225,18 +225,16 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
{
return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
}
void ath10k_htt_rx_detach(struct ath10k_htt *htt)
{
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task);
tasklet_kill(&htt->txrx_compl_task);
skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
@ -270,10 +268,12 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
int idx;
struct sk_buff *msdu;
spin_lock_bh(&htt->rx_ring.lock);
lockdep_assert_held(&htt->rx_ring.lock);
if (ath10k_htt_rx_ring_elems(htt) == 0)
ath10k_warn("htt rx ring is empty!\n");
if (htt->rx_ring.fill_cnt == 0) {
ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
return NULL;
}
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
@ -283,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
htt->rx_ring.fill_cnt--;
spin_unlock_bh(&htt->rx_ring.lock);
return msdu;
}
@ -307,8 +306,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
if (ath10k_htt_rx_ring_elems(htt) == 0)
ath10k_warn("htt rx ring is empty!\n");
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n");
@ -400,6 +398,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count;
msdu_chaining = msdu_chained;
if (msdu_len_invalid)
msdu_len = 0;
@ -427,7 +426,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->next = next;
msdu = next;
msdu_chaining = 1;
}
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
@ -529,6 +527,12 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
(unsigned long)htt);
skb_queue_head_init(&htt->tx_compl_q);
skb_queue_head_init(&htt->rx_compl_q);
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt);
ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@ -632,6 +636,12 @@ struct amsdu_subframe_hdr {
__be16 len;
} __packed;
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
{
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
}
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info)
{
@ -681,7 +691,7 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
case RX_MSDU_DECAP_NATIVE_WIFI:
/* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
skb_pull(skb, hdr_len);
@ -768,7 +778,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
case RX_MSDU_DECAP_NATIVE_WIFI:
/* Pull decapped header */
hdr = (struct ieee80211_hdr *)skb->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
skb_pull(skb, hdr_len);
/* Push original header */
@ -846,6 +856,20 @@ static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
return false;
}
static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
return true;
return false;
}
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
@ -877,6 +901,57 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
return CHECKSUM_UNNECESSARY;
}
static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
{
struct sk_buff *next = msdu_head->next;
struct sk_buff *to_free = next;
int space;
int total_len = 0;
/* TODO: Might could optimize this by using
* skb_try_coalesce or similar method to
* decrease copying, or maybe get mac80211 to
* provide a way to just receive a list of
* skb?
*/
msdu_head->next = NULL;
/* Allocate total length all at once. */
while (next) {
total_len += next->len;
next = next->next;
}
space = total_len - skb_tailroom(msdu_head);
if ((space > 0) &&
(pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
/* TODO: bump some rx-oom error stat */
/* put it back together so we can free the
* whole list at once.
*/
msdu_head->next = to_free;
return -1;
}
/* Walk list again, copying contents into
* msdu_head
*/
next = to_free;
while (next) {
skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
next->len);
next = next->next;
}
/* If here, we have consolidated skb. Free the
* fragments and pass the main skb on up the
* stack.
*/
ath10k_htt_rx_free_msdu_chain(to_free);
return 0;
}
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
@ -888,6 +963,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
u8 *fw_desc;
int i, j;
lockdep_assert_held(&htt->rx_ring.lock);
memset(&info, 0, sizeof(info));
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
@ -940,7 +1017,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
status = info.status;
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
ath10k_htt_rx_is_mgmt(msdu_head)) {
ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
@ -964,10 +1042,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
/* FIXME: we do not support chaining yet.
* this needs investigation */
if (msdu_chaining) {
ath10k_warn("htt rx msdu_chaining is true\n");
if (msdu_chaining &&
(ath10k_unchain_msdu(msdu_head) < 0)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
@ -990,6 +1066,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
info.rate.info0 = rx->ppdu.info0;
info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
info.tsf = __le32_to_cpu(rx->ppdu.tsf);
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
@ -1023,8 +1100,11 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_head = NULL;
msdu_tail = NULL;
spin_lock_bh(&htt->rx_ring.lock);
msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
&msdu_head, &msdu_tail);
spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
@ -1116,6 +1196,45 @@ end:
}
}
static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
struct htt_tx_done tx_done = {};
int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
__le16 msdu_id;
int i;
lockdep_assert_held(&htt->tx_lock);
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
tx_done.no_ack = true;
break;
case HTT_DATA_TX_STATUS_OK:
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
tx_done.discard = true;
break;
default:
ath10k_warn("unhandled tx completion status %d\n", status);
tx_done.discard = true;
break;
}
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
ath10k_txrx_tx_unref(htt, &tx_done);
}
}
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@ -1134,10 +1253,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
complete(&htt->target_version_received);
break;
}
case HTT_T2H_MSG_TYPE_RX_IND: {
ath10k_htt_rx_handler(htt, &resp->rx_ind);
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
spin_lock_bh(&htt->rx_ring.lock);
__skb_queue_tail(&htt->rx_compl_q, skb);
spin_unlock_bh(&htt->rx_ring.lock);
tasklet_schedule(&htt->txrx_compl_task);
return;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
.vdev_id = resp->peer_map.vdev_id,
@ -1172,44 +1293,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
spin_unlock_bh(&htt->tx_lock);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
struct htt_tx_done tx_done = {};
int status = MS(resp->data_tx_completion.flags,
HTT_DATA_TX_STATUS);
__le16 msdu_id;
int i;
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
tx_done.no_ack = true;
break;
case HTT_DATA_TX_STATUS_OK:
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
tx_done.discard = true;
break;
default:
ath10k_warn("unhandled tx completion status %d\n",
status);
tx_done.discard = true;
break;
}
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
ath10k_txrx_tx_unref(htt, &tx_done);
}
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
spin_lock_bh(&htt->tx_lock);
__skb_queue_tail(&htt->tx_compl_q, skb);
spin_unlock_bh(&htt->tx_lock);
tasklet_schedule(&htt->txrx_compl_task);
return;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
@ -1249,3 +1343,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
/* Free the indication buffer */
dev_kfree_skb_any(skb);
}
static void ath10k_htt_txrx_compl_task(unsigned long ptr)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
struct htt_resp *resp;
struct sk_buff *skb;
spin_lock_bh(&htt->tx_lock);
while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
dev_kfree_skb_any(skb);
}
spin_unlock_bh(&htt->tx_lock);
spin_lock_bh(&htt->rx_ring.lock);
while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
resp = (struct htt_resp *)skb->data;
ath10k_htt_rx_handler(htt, &resp->rx_ind);
dev_kfree_skb_any(skb);
}
spin_unlock_bh(&htt->rx_ring.lock);
}

View File

@ -109,6 +109,14 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
return -ENOMEM;
}
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
kfree(htt->used_msdu_ids);
kfree(htt->pending_tx);
return -ENOMEM;
}
return 0;
}
@ -117,9 +125,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
struct htt_tx_done tx_done = {0};
int msdu_id;
/* No locks needed. Called after communication with the device has
* been stopped. */
spin_lock_bh(&htt->tx_lock);
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
@ -132,6 +138,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
ath10k_txrx_tx_unref(htt, &tx_done);
}
spin_unlock_bh(&htt->tx_lock);
}
void ath10k_htt_tx_detach(struct ath10k_htt *htt)
@ -139,6 +146,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
ath10k_htt_tx_cleanup_pending(htt);
kfree(htt->pending_tx);
kfree(htt->used_msdu_ids);
dma_pool_destroy(htt->tx_pool);
return;
}
@ -334,7 +342,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
goto err_free_msdu_id;
}
res = ath10k_skb_map(dev, msdu);
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_free_txdesc;
@ -348,8 +358,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
skb_cb->htt.frag_len = 0;
skb_cb->htt.pad_len = 0;
skb_cb->htt.txbuf = NULL;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
@ -358,7 +367,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
return 0;
err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
@ -375,19 +384,19 @@ err:
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
struct htt_cmd *cmd;
struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *txdesc = NULL;
bool use_frags;
u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
u8 tid;
int prefetch_len, desc_len;
int msdu_id = -1;
struct ath10k_hif_sg_item sg_items[2];
struct htt_data_tx_desc_frag *frags;
u8 vdev_id = skb_cb->vdev_id;
u8 tid = skb_cb->htt.tid;
int prefetch_len;
int res;
u8 flags0;
u16 flags1;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
dma_addr_t paddr;
u32 frags_paddr;
bool use_frags;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
@ -406,114 +415,120 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
txdesc = ath10k_htc_alloc_skb(desc_len);
if (!txdesc) {
res = -ENOMEM;
goto err_free_msdu_id;
}
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
ath10k_warn("htt alignment check failed. dropping packet.\n");
res = -EIO;
goto err_free_txdesc;
}
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr);
if (!skb_cb->htt.txbuf)
goto err_free_msdu_id;
skb_cb->htt.txbuf_paddr = paddr;
if (use_frags) {
skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
skb_cb->htt.pad_len = (unsigned long)msdu->data -
round_down((unsigned long)msdu->data, 4);
skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
} else {
skb_cb->htt.frag_len = 0;
skb_cb->htt.pad_len = 0;
}
res = ath10k_skb_map(dev, msdu);
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_pull_txfrag;
goto err_free_txbuf;
if (use_frags) {
dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
DMA_TO_DEVICE);
if (likely(use_frags)) {
frags = skb_cb->htt.txbuf->frags;
/* tx fragment list must be terminated with zero-entry */
tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
skb_cb->htt.frag_len +
skb_cb->htt.pad_len);
tx_frags[0].len = __cpu_to_le32(msdu->len -
skb_cb->htt.frag_len -
skb_cb->htt.pad_len);
tx_frags[1].paddr = __cpu_to_le32(0);
tx_frags[1].len = __cpu_to_le32(0);
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
frags[0].len = __cpu_to_le32(msdu->len);
frags[1].paddr = 0;
frags[1].len = 0;
dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
DMA_TO_DEVICE);
}
ath10k_dbg(ATH10K_DBG_HTT, "tx-msdu 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "tx-msdu: ",
msdu->data, msdu->len);
skb_put(txdesc, desc_len);
cmd = (struct htt_cmd *)txdesc->data;
tid = ATH10K_SKB_CB(msdu)->htt.tid;
ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
flags0 = 0;
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
if (use_frags)
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
else
frags_paddr = skb_cb->htt.txbuf_paddr;
} else {
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags1 = 0;
frags_paddr = skb_cb->paddr;
}
/* Normally all commands go through HTC which manages tx credits for
* each endpoint and notifies when tx is completed.
*
* HTT endpoint is creditless so there's no need to care about HTC
* flags. In that case it is trivial to fill the HTC header here.
*
* MSDU transmission is considered completed upon HTT event. This
* implies no relevant resources can be freed until after the event is
* received. That's why HTC tx completion handler itself is ignored by
* setting NULL to transfer_context for all sg items.
*
* There is simply no point in pushing HTT TX_FRM through HTC tx path
* as it's a waste of resources. By bypassing HTC it is possible to
* avoid extra memory allocations, compress data structures and thus
* improve performance. */
skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
sizeof(skb_cb->htt.txbuf->cmd_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_tx) +
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
cmd->data_tx.flags0 = flags0;
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
cmd->data_tx.len = __cpu_to_le16(msdu->len -
skb_cb->htt.frag_len -
skb_cb->htt.pad_len);
cmd->data_tx.id = __cpu_to_le16(msdu_id);
cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
ath10k_dbg(ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
(u32)skb_cb->paddr, vdev_id, tid);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
sizeof(skb_cb->htt.txbuf->frags);
sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_tx);
sg_items[1].transfer_id = 0;
sg_items[1].transfer_context = NULL;
sg_items[1].vaddr = msdu->data;
sg_items[1].paddr = skb_cb->paddr;
sg_items[1].len = prefetch_len;
res = ath10k_hif_tx_sg(htt->ar,
htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
sg_items, ARRAY_SIZE(sg_items));
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
err_pull_txfrag:
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txbuf:
dma_pool_free(htt->tx_pool,
skb_cb->htt.txbuf,
skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;

View File

@ -323,13 +323,15 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
ath10k_warn("Failed to create wmi peer: %i\n", ret);
ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
if (ret) {
ath10k_warn("Failed to wait for created wmi peer: %i\n", ret);
ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
spin_lock_bh(&ar->data_lock);
@ -349,7 +351,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_pdev_set_param(ar, param,
ATH10K_KICKOUT_THRESHOLD);
if (ret) {
ath10k_warn("Failed to set kickout threshold: %d\n", ret);
ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@ -357,8 +360,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MIN_IDLE);
if (ret) {
ath10k_warn("Failed to set keepalive minimum idle time : %d\n",
ret);
ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
arvif->vdev_id, ret);
return ret;
}
@ -366,8 +369,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_IDLE);
if (ret) {
ath10k_warn("Failed to set keepalive maximum idle time: %d\n",
ret);
ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@ -375,8 +378,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
if (ret) {
ath10k_warn("Failed to set keepalive maximum unresponsive time: %d\n",
ret);
ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@ -529,13 +532,15 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn("WMI vdev start failed: ret %d\n", ret);
ath10k_warn("WMI vdev %i start failed: ret %d\n",
arg.vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
ath10k_warn("vdev setup failed %d\n", ret);
ath10k_warn("vdev %i setup failed %d\n",
arg.vdev_id, ret);
return ret;
}
@ -553,13 +558,15 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
ath10k_warn("WMI vdev %i stop failed: ret %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
ath10k_warn("vdev setup failed %d\n", ret);
ath10k_warn("vdev %i setup sync failed %d\n",
arvif->vdev_id, ret);
return ret;
}
@ -597,19 +604,22 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
ath10k_warn("Monitor vdev %i start failed: ret %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
ath10k_warn("Monitor vdev setup failed %d\n", ret);
ath10k_warn("Monitor vdev %i setup failed %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
ath10k_warn("Monitor vdev up failed: %d\n", ret);
ath10k_warn("Monitor vdev %i up failed: %d\n",
vdev_id, ret);
goto vdev_stop;
}
@ -621,7 +631,8 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
vdev_stop:
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Monitor vdev stop failed: %d\n", ret);
ath10k_warn("Monitor vdev %i stop failed: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
@ -644,15 +655,18 @@ static int ath10k_monitor_stop(struct ath10k *ar)
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Monitor vdev down failed: %d\n", ret);
ath10k_warn("Monitor vdev %i down failed: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Monitor vdev stop failed: %d\n", ret);
ath10k_warn("Monitor vdev %i stop failed: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
if (ret)
ath10k_warn("Monitor_down sync failed: %d\n", ret);
ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
ar->monitor_vdev_id, ret);
ar->monitor_enabled = false;
return ret;
@ -682,7 +696,8 @@ static int ath10k_monitor_create(struct ath10k *ar)
WMI_VDEV_TYPE_MONITOR,
0, ar->mac_addr);
if (ret) {
ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
ar->monitor_vdev_id, ret);
goto vdev_fail;
}
@ -711,7 +726,8 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
@ -839,7 +855,9 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
spin_lock_bh(&arvif->ar->data_lock);
if (arvif->beacon) {
ath10k_skb_unmap(arvif->ar->dev, arvif->beacon);
dma_unmap_single(arvif->ar->dev,
ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
@ -862,8 +880,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
ath10k_warn("Failed to bring up VDEV: %d\n",
arvif->vdev_id);
ath10k_warn("Failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
ath10k_vdev_stop(arvif);
return;
}
@ -943,8 +961,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
arvif->vdev_id);
ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return ret;
}
} else {
@ -1196,8 +1214,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
WMI_AP_PS_PEER_PARAM_UAPSD,
uapsd);
if (ret) {
ath10k_warn("failed to set ap ps peer param uapsd: %d\n",
ret);
ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@ -1206,8 +1224,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
WMI_AP_PS_PEER_PARAM_MAX_SP,
max_sp);
if (ret) {
ath10k_warn("failed to set ap ps peer param max sp: %d\n",
ret);
ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@ -1218,8 +1236,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
if (ret) {
ath10k_warn("failed to set ap ps peer param ageout time: %d\n",
ret);
ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
@ -1411,8 +1429,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
ath10k_warn("Failed to find station entry for %pM\n",
bss_conf->bssid);
ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
}
@ -1424,8 +1442,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
if (ret) {
ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
bss_conf->bssid, ret);
ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
bss_conf->bssid, arvif->vdev_id, ret);
rcu_read_unlock();
return;
}
@ -1434,14 +1452,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
ath10k_warn("Peer assoc failed for %pM\n: %d",
bss_conf->bssid, ret);
ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
bss_conf->bssid, arvif->vdev_id, ret);
return;
}
ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
if (ret) {
ath10k_warn("failed to setup peer SMPS: %d\n", ret);
ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
arvif->vdev_id, ret);
return;
}
@ -1514,34 +1533,35 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
if (ret) {
ath10k_warn("WMI peer assoc prepare failed for %pM\n",
sta->addr);
ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
ath10k_warn("Peer assoc failed for STA %pM\n: %d",
sta->addr, ret);
ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
if (ret) {
ath10k_warn("failed to setup peer SMPS: %d\n", ret);
ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
return ret;
}
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
ath10k_warn("could not install peer wep keys (%d)\n", ret);
ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
ath10k_warn("could not set qos params for STA %pM, %d\n",
sta->addr, ret);
ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
@ -1557,7 +1577,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@ -2524,7 +2545,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
ath10k_warn("WMI vdev create failed: ret %d\n", ret);
ath10k_warn("WMI vdev %i create failed: ret %d\n",
arvif->vdev_id, ret);
goto err;
}
@ -2535,7 +2557,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
arvif->def_wep_key_idx);
if (ret) {
ath10k_warn("Failed to set default keyid: %d\n", ret);
ath10k_warn("Failed to set vdev %i default keyid: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
@ -2544,21 +2567,23 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ATH10K_HW_TXRX_NATIVE_WIFI);
/* 10.X firmware does not support this VDEV parameter. Do not warn */
if (ret && ret != -EOPNOTSUPP) {
ath10k_warn("Failed to set TX encap: %d\n", ret);
ath10k_warn("Failed to set vdev %i TX encap: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
ath10k_warn("Failed to create peer for AP: %d\n", ret);
ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
ret = ath10k_mac_set_kickout(arvif);
if (ret) {
ath10k_warn("Failed to set kickout parameters: %d\n",
ret);
ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
}
@ -2569,7 +2594,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
ath10k_warn("Failed to set RX wake policy: %d\n", ret);
ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@ -2578,7 +2604,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@ -2587,7 +2614,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
}
@ -2651,17 +2679,19 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
if (ret)
ath10k_warn("Failed to remove peer for AP: %d\n", ret);
ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
}
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
arvif->vdev_id);
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
ath10k_warn("WMI vdev delete failed: %d\n", ret);
ath10k_warn("WMI vdev %i delete failed: %d\n",
arvif->vdev_id, ret);
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = false;
@ -2750,8 +2780,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, arvif->beacon_interval);
if (ret)
ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BEACON) {
@ -2763,8 +2793,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
@ -2778,8 +2808,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
arvif->vdev_id);
ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_SSID &&
@ -2799,7 +2829,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n",
ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
@ -2815,8 +2845,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_vdev_start(arvif);
if (ret) {
ath10k_warn("failed to start vdev: %d\n",
ret);
ath10k_warn("failed to start vdev %i: %d\n",
arvif->vdev_id, ret);
goto exit;
}
@ -2851,8 +2881,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
cts_prot);
if (ret)
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@ -2870,8 +2900,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@ -2889,8 +2919,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
ath10k_warn("Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
ath10k_warn("Failed to set preamble for vdev %d: %i\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ASSOC) {
@ -3021,8 +3051,8 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
key->keyidx);
if (ret)
ath10k_warn("failed to set group key as default key: %d\n",
ret);
ath10k_warn("failed to set vdev %i group key as default key: %d\n",
arvif->vdev_id, ret);
}
static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@ -3082,7 +3112,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
if (ret) {
ath10k_warn("ath10k_install_key failed (%d)\n", ret);
ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
}
@ -3179,6 +3210,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
int max_num_peers;
int ret = 0;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
}
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST))
@ -3208,10 +3246,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer create %pM (new sta) num_peers %d\n",
arvif->vdev_id, sta->addr, ar->num_peers);
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
@ -3226,8 +3260,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
@ -3243,8 +3277,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to associate station: %pM\n",
sta->addr);
ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@ -3257,8 +3291,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
sta->addr, arvif->vdev_id, ret);
}
exit:
mutex_unlock(&ar->conf_mutex);
@ -3539,7 +3573,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}), ATH10K_FLUSH_TIMEOUT_HZ);
if (ret <= 0 || skip)
ath10k_warn("tx not flushed\n");
ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
skip, ar->state, ret);
skip:
mutex_unlock(&ar->conf_mutex);
@ -3905,7 +3940,8 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
u8 fixed_rate,
u8 fixed_nss)
u8 fixed_nss,
u8 force_sgi)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
@ -3914,12 +3950,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
mutex_lock(&ar->conf_mutex);
if (arvif->fixed_rate == fixed_rate &&
arvif->fixed_nss == fixed_nss)
arvif->fixed_nss == fixed_nss &&
arvif->force_sgi == force_sgi)
goto exit;
if (fixed_rate == WMI_FIXED_RATE_NONE)
ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
if (force_sgi)
ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
vdev_param = ar->wmi.vdev_param->fixed_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
vdev_param, fixed_rate);
@ -3945,6 +3985,19 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
arvif->fixed_nss = fixed_nss;
vdev_param = ar->wmi.vdev_param->sgi;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
force_sgi);
if (ret) {
ath10k_warn("Could not set sgi param %d: %d\n",
force_sgi, ret);
ret = -EINVAL;
goto exit;
}
arvif->force_sgi = force_sgi;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
@ -3959,6 +4012,11 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
u8 fixed_rate = WMI_FIXED_RATE_NONE;
u8 fixed_nss = ar->num_rf_chains;
u8 force_sgi;
force_sgi = mask->control[band].gi;
if (force_sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
if (!ath10k_default_bitrate_mask(ar, band, mask)) {
if (!ath10k_get_fixed_rate_nss(mask, band,
@ -3967,7 +4025,13 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
return -EINVAL;
}
return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss);
if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
ath10k_warn("Could not force SGI usage for default rate settings\n");
return -EINVAL;
}
return ath10k_set_fixed_rate_param(arvif, fixed_rate,
fixed_nss, force_sgi);
}
static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
@ -4060,6 +4124,16 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &arsta->update_wk);
}
static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
/*
* FIXME: Return 0 for time being. Need to figure out whether FW
* has the API to fetch 64-bit local TSF
*/
return 0;
}
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@ -4085,6 +4159,7 @@ static const struct ieee80211_ops ath10k_ops = {
.set_bitrate_mask = ath10k_set_bitrate_mask,
.channel_switch_beacon = ath10k_channel_switch_beacon,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
#ifdef CONFIG_PM
.suspend = ath10k_suspend,
.resume = ath10k_resume,
@ -4361,7 +4436,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
ath10k_get_arvif_iter,
&arvif_iter);
if (!arvif_iter.arvif) {
ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
ath10k_warn("No VIF found for vdev %d\n", vdev_id);
return NULL;
}
@ -4442,7 +4517,8 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS;
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_SPECTRUM_MGMT;
/* MSDU can have HTT TX fragment pushed in front. The additional 4
* bytes is used for padding/alignment if necessary. */
@ -4500,7 +4576,7 @@ int ath10k_mac_register(struct ath10k *ar)
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
ath10k_err("Regulatory initialization failed\n");
ath10k_err("Regulatory initialization failed: %i\n", ret);
goto err_free;
}

View File

@ -58,12 +58,10 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
u32 *data);
static void ath10k_pci_process_ce(struct ath10k *ar);
static int ath10k_pci_post_rx(struct ath10k *ar);
static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar);
static int ath10k_pci_warm_reset(struct ath10k *ar);
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
@ -74,7 +72,6 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
static void ath10k_pci_cleanup_ce(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@ -679,34 +676,12 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
}
}
/*
* FIXME: Handle OOM properly.
*/
static inline
struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k_pci_compl *compl = NULL;
spin_lock_bh(&pipe_info->pipe_lock);
if (list_empty(&pipe_info->compl_free)) {
ath10k_warn("Completion buffers are full\n");
goto exit;
}
compl = list_first_entry(&pipe_info->compl_free,
struct ath10k_pci_compl, list);
list_del(&compl->list);
exit:
spin_unlock_bh(&pipe_info->pipe_lock);
return compl;
}
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
void *transfer_context;
u32 ce_data;
unsigned int nbytes;
@ -715,27 +690,12 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
&ce_data, &nbytes,
&transfer_id) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
/* no need to call tx completion for NULL pointers */
if (transfer_context == NULL)
continue;
compl->state = ATH10K_PCI_COMPL_SEND;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = 0;
/*
* Add the completion to the processing queue.
*/
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
cb->tx_completion(ar, transfer_context, transfer_id);
}
ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
@ -744,77 +704,100 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
struct sk_buff *skb;
void *transfer_context;
u32 ce_data;
unsigned int nbytes;
unsigned int nbytes, max_nbytes;
unsigned int transfer_id;
unsigned int flags;
int err;
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
compl->state = ATH10K_PCI_COMPL_RECV;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = flags;
err = ath10k_pci_post_rx_pipe(pipe_info, 1);
if (unlikely(err)) {
/* FIXME: retry */
ath10k_warn("failed to replenish CE rx ring %d: %d\n",
pipe_info->pipe_num, err);
}
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
/*
* Add the completion to the processing queue.
*/
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
}
max_nbytes, DMA_FROM_DEVICE);
ath10k_pci_process_ce(ar);
if (unlikely(max_nbytes < nbytes)) {
ath10k_warn("rxed more than expected (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
}
skb_put(skb, nbytes);
cb->rx_completion(ar, skb, pipe_info->pipe_num);
}
}
/* Send the first nbytes bytes of the buffer */
static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
unsigned int transfer_id,
unsigned int bytes, struct sk_buff *nbuf)
static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
unsigned int len;
u32 flags = 0;
int ret;
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
unsigned int write_index = src_ring->write_index;
int err, i;
len = min(bytes, nbuf->len);
bytes -= len;
spin_lock_bh(&ar_pci->ce_lock);
if (len & 3)
ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) < n_items)) {
err = -ENOBUFS;
goto unlock;
}
for (i = 0; i < n_items - 1; i++) {
ath10k_dbg(ATH10K_DBG_PCI,
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
i, items[i].paddr, items[i].len, n_items);
ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
items[i].vaddr, items[i].len);
err = ath10k_ce_send_nolock(ce_pipe,
items[i].transfer_context,
items[i].paddr,
items[i].len,
items[i].transfer_id,
CE_SEND_FLAG_GATHER);
if (err)
goto unlock;
}
/* `i` is equal to `n_items -1` after for() */
ath10k_dbg(ATH10K_DBG_PCI,
"pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
nbuf->data, (unsigned long long) skb_cb->paddr,
nbuf->len, len);
ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
"ath10k tx: data: ",
nbuf->data, nbuf->len);
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
i, items[i].paddr, items[i].len, n_items);
ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
items[i].vaddr, items[i].len);
ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
flags);
if (ret)
ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
err = ath10k_ce_send_nolock(ce_pipe,
items[i].transfer_context,
items[i].paddr,
items[i].len,
items[i].transfer_id,
0);
if (err)
goto unlock;
return ret;
err = 0;
unlock:
spin_unlock_bh(&ar_pci->ce_lock);
return err;
}
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
@ -903,52 +886,6 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
sizeof(ar_pci->msg_callbacks_current));
}
static int ath10k_pci_alloc_compl(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
const struct ce_attr *attr;
struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
int i, pipe_num, completions;
spin_lock_init(&ar_pci->compl_lock);
INIT_LIST_HEAD(&ar_pci->compl_process);
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
spin_lock_init(&pipe_info->pipe_lock);
INIT_LIST_HEAD(&pipe_info->compl_free);
/* Handle Diagnostic CE specially */
if (pipe_info->ce_hdl == ar_pci->ce_diag)
continue;
attr = &host_ce_config_wlan[pipe_num];
completions = 0;
if (attr->src_nentries)
completions += attr->src_nentries;
if (attr->dest_nentries)
completions += attr->dest_nentries;
for (i = 0; i < completions; i++) {
compl = kmalloc(sizeof(*compl), GFP_KERNEL);
if (!compl) {
ath10k_warn("No memory for completion state\n");
ath10k_pci_cleanup_ce(ar);
return -ENOMEM;
}
compl->state = ATH10K_PCI_COMPL_FREE;
list_add_tail(&compl->list, &pipe_info->compl_free);
}
}
return 0;
}
static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@ -993,147 +930,6 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
tasklet_kill(&ar_pci->pipe_info[i].intr);
}
static void ath10k_pci_stop_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_compl *compl;
struct sk_buff *skb;
/* Mark pending completions as aborted, so that upper layers free up
* their associated resources */
spin_lock_bh(&ar_pci->compl_lock);
list_for_each_entry(compl, &ar_pci->compl_process, list) {
skb = compl->skb;
ATH10K_SKB_CB(skb)->is_aborted = true;
}
spin_unlock_bh(&ar_pci->compl_lock);
}
static void ath10k_pci_cleanup_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_compl *compl, *tmp;
struct ath10k_pci_pipe *pipe_info;
struct sk_buff *netbuf;
int pipe_num;
/* Free pending completions. */
spin_lock_bh(&ar_pci->compl_lock);
if (!list_empty(&ar_pci->compl_process))
ath10k_warn("pending completions still present! possible memory leaks.\n");
list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
list_del(&compl->list);
netbuf = compl->skb;
dev_kfree_skb_any(netbuf);
kfree(compl);
}
spin_unlock_bh(&ar_pci->compl_lock);
/* Free unused completions for each pipe. */
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
spin_lock_bh(&pipe_info->pipe_lock);
list_for_each_entry_safe(compl, tmp,
&pipe_info->compl_free, list) {
list_del(&compl->list);
kfree(compl);
}
spin_unlock_bh(&pipe_info->pipe_lock);
}
}
static void ath10k_pci_process_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ar->hif.priv;
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
struct ath10k_pci_compl *compl;
struct sk_buff *skb;
unsigned int nbytes;
int ret, send_done = 0;
/* Upper layers aren't ready to handle tx/rx completions in parallel so
* we must serialize all completion processing. */
spin_lock_bh(&ar_pci->compl_lock);
if (ar_pci->compl_processing) {
spin_unlock_bh(&ar_pci->compl_lock);
return;
}
ar_pci->compl_processing = true;
spin_unlock_bh(&ar_pci->compl_lock);
for (;;) {
spin_lock_bh(&ar_pci->compl_lock);
if (list_empty(&ar_pci->compl_process)) {
spin_unlock_bh(&ar_pci->compl_lock);
break;
}
compl = list_first_entry(&ar_pci->compl_process,
struct ath10k_pci_compl, list);
list_del(&compl->list);
spin_unlock_bh(&ar_pci->compl_lock);
switch (compl->state) {
case ATH10K_PCI_COMPL_SEND:
cb->tx_completion(ar,
compl->skb,
compl->transfer_id);
send_done = 1;
break;
case ATH10K_PCI_COMPL_RECV:
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
if (ret) {
ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
compl->pipe_info->pipe_num, ret);
break;
}
skb = compl->skb;
nbytes = compl->nbytes;
ath10k_dbg(ATH10K_DBG_PCI,
"ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
skb, nbytes);
ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
"ath10k rx: ", skb->data, nbytes);
if (skb->len + skb_tailroom(skb) >= nbytes) {
skb_trim(skb, 0);
skb_put(skb, nbytes);
cb->rx_completion(ar, skb,
compl->pipe_info->pipe_num);
} else {
ath10k_warn("rxed more than expected (nbytes %d, max %d)",
nbytes,
skb->len + skb_tailroom(skb));
}
break;
case ATH10K_PCI_COMPL_FREE:
ath10k_warn("free completion cannot be processed\n");
break;
default:
ath10k_warn("invalid completion state (%d)\n",
compl->state);
break;
}
compl->state = ATH10K_PCI_COMPL_FREE;
/*
* Add completion back to the pipe's free list.
*/
spin_lock_bh(&compl->pipe_info->pipe_lock);
list_add_tail(&compl->list, &compl->pipe_info->compl_free);
spin_unlock_bh(&compl->pipe_info->pipe_lock);
}
spin_lock_bh(&ar_pci->compl_lock);
ar_pci->compl_processing = false;
spin_unlock_bh(&ar_pci->compl_lock);
}
/* TODO - temporary mapping while we have too few CE's */
static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id, u8 *ul_pipe,
@ -1305,17 +1101,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
ret = ath10k_pci_alloc_compl(ar);
if (ret) {
ath10k_warn("failed to allocate CE completions: %d\n", ret);
goto err_early_irq;
}
ret = ath10k_pci_request_irq(ar);
if (ret) {
ath10k_warn("failed to post RX buffers for all pipes: %d\n",
ret);
goto err_free_compl;
goto err_early_irq;
}
ret = ath10k_pci_setup_ce_irq(ar);
@ -1339,10 +1129,6 @@ err_stop:
ath10k_ce_disable_interrupts(ar);
ath10k_pci_free_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_stop_ce(ar);
ath10k_pci_process_ce(ar);
err_free_compl:
ath10k_pci_cleanup_ce(ar);
err_early_irq:
/* Though there should be no interrupts (device was reset)
* power_down() expects the early IRQ to be installed as per the
@ -1413,18 +1199,10 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
/*
* Indicate the completion to higer layer to free
* the buffer
*/
if (!netbuf) {
ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
ce_hdl->id);
/* no need to call tx completion for NULL pointers */
if (!netbuf)
continue;
}
ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
id);
@ -1482,7 +1260,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_free_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_stop_ce(ar);
ret = ath10k_pci_request_early_irq(ar);
if (ret)
@ -1492,8 +1269,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
* not DMA nor interrupt. We process the leftovers and then free
* everything else up. */
ath10k_pci_process_ce(ar);
ath10k_pci_cleanup_ce(ar);
ath10k_pci_buffer_cleanup(ar);
/* Make the sure the device won't access any structures on the host by
@ -2269,7 +2044,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
#endif
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.send_head = ath10k_pci_hif_send_head,
.tx_sg = ath10k_pci_hif_tx_sg,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
.start = ath10k_pci_hif_start,
.stop = ath10k_pci_hif_stop,

View File

@ -43,23 +43,6 @@ struct bmi_xfer {
u32 resp_len;
};
enum ath10k_pci_compl_state {
ATH10K_PCI_COMPL_FREE = 0,
ATH10K_PCI_COMPL_SEND,
ATH10K_PCI_COMPL_RECV,
};
struct ath10k_pci_compl {
struct list_head list;
enum ath10k_pci_compl_state state;
struct ath10k_ce_pipe *ce_state;
struct ath10k_pci_pipe *pipe_info;
struct sk_buff *skb;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
};
/*
* PCI-specific Target state
*
@ -175,9 +158,6 @@ struct ath10k_pci_pipe {
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
/* List of free CE completion slots */
struct list_head compl_free;
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
@ -205,14 +185,6 @@ struct ath10k_pci {
atomic_t keep_awake_count;
bool verified_awake;
/* List of CE completions to be processed */
struct list_head compl_process;
/* protects compl_processing and compl_process */
spinlock_t compl_lock;
bool compl_processing;
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;

View File

@ -51,7 +51,8 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ieee80211_tx_info *info;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
int ret;
lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
@ -65,12 +66,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
msdu = htt->pending_tx[tx_done->msdu_id];
skb_cb = ATH10K_SKB_CB(msdu);
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->htt.frag_len)
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
if (skb_cb->htt.txbuf)
dma_pool_free(htt->tx_pool,
skb_cb->htt.txbuf,
skb_cb->htt.txbuf_paddr);
ath10k_report_offchan_tx(htt->ar, msdu);
@ -92,13 +93,11 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
/* we do not own the msdu anymore */
exit:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
}
static const u8 rx_legacy_rate_idx[] = {
@ -258,6 +257,12 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->band = ch->band;
status->freq = ch->center_freq;
if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
/* TSF available only in 32-bit */
status->mactime = info->tsf & 0xffffffff;
status->flag |= RX_FLAG_MACTIME_END;
}
ath10k_dbg(ATH10K_DBG_DATA,
"rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
info->skb,
@ -378,7 +383,8 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
ath10k_warn("unknown peer id %d\n", ev->peer_id);
ath10k_warn("peer-unmap-event: unknown peer id %d\n",
ev->peer_id);
goto exit;
}

View File

@ -1360,7 +1360,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
int vdev_id = 0;
int ret, vdev_id = 0;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
@ -1435,16 +1435,27 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn("SWBA overrun on vdev %d\n",
arvif->vdev_id);
ath10k_skb_unmap(ar->dev, arvif->beacon);
dma_unmap_single(arvif->ar->dev,
ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
}
ath10k_skb_map(ar->dev, bcn);
ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
bcn->data, bcn->len,
DMA_TO_DEVICE);
ret = dma_mapping_error(arvif->ar->dev,
ATH10K_SKB_CB(bcn)->paddr);
if (ret) {
ath10k_warn("failed to map beacon: %d\n", ret);
goto skip;
}
arvif->beacon = bcn;
arvif->beacon_sent = false;
ath10k_wmi_tx_beacon_nowait(arvif);
skip:
spin_unlock_bh(&ar->data_lock);
}
}
@ -3382,7 +3393,6 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
ci->max_power = ch->max_power;
ci->reg_power = ch->max_reg_power;
ci->antenna_max = ch->max_antenna_gain;
ci->antenna_max = 0;
/* mode & flags share storage */
ci->mode = ch->mode;

View File

@ -751,6 +751,9 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(ah->dev, bf->skbaddr))
return -ENOSPC;
ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
ARRAY_SIZE(bf->rates));

View File

@ -52,7 +52,8 @@ obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o \
common-init.o
common-init.o \
common-beacon.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \

View File

@ -39,6 +39,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
.name = "qca955x_wmac",
.driver_data = AR9300_DEVID_QCA955X,
},
{
.name = "qca953x_wmac",
.driver_data = AR9300_DEVID_AR953X,
},
{},
};
@ -82,6 +86,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int irq;
int ret = 0;
struct ath_hw *ah;
struct ath_common *common;
char hw_name[64];
if (!dev_get_platdata(&pdev->dev)) {
@ -124,9 +129,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->mem = mem;
sc->irq = irq;
/* Will be cleared in ath9k_start() */
set_bit(SC_OP_INVALID, &sc->sc_flags);
ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
@ -144,6 +146,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)mem, irq);
common = ath9k_hw_common(sc->sc_ah);
/* Will be cleared in ath9k_start() */
set_bit(ATH_OP_INVALID, &common->op_flags);
return 0;
err_irq:

View File

@ -318,17 +318,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
BUG_ON(aniState == NULL);
ah->stats.ast_ani_reset++;
/* only allow a subset of functions in AP mode */
if (ah->opmode == NL80211_IFTYPE_AP) {
if (IS_CHAN_2GHZ(chan)) {
ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
ATH9K_ANI_FIRSTEP_LEVEL);
if (AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function |= ATH9K_ANI_MRC_CCK;
} else
ah->ani_function = 0;
}
ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
aniState->ofdmNoiseImmunityLevel);
cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,

View File

@ -26,10 +26,6 @@ static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
static const int cycpwrThr1_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
/*
* register values to turn OFDM weak signal detection OFF
*/
@ -921,7 +917,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &ah->ani;
s32 value, value2;
s32 value;
switch (cmd & ah->ani_function) {
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
@ -1008,42 +1004,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_FIRSTEP_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
}
/*
* make register setting relative to default
* from INI file & cap value
*/
value = firstep_table[level] -
firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstep;
if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
value = level * 2;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP,
value);
/*
* we need to set first step low register too
* make register setting relative to default
* from INI file & cap value
*/
value2 = firstep_table[level] -
firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstepLow;
if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
AR_PHY_FIND_SIG_FIRSTEP, value);
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
AR_PHY_FIND_SIG_FIRSTEP_LOW, value);
if (level != aniState->firstepLevel) {
ath_dbg(common, ANI,
@ -1060,7 +1025,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL,
value2,
value,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
@ -1073,41 +1038,13 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
}
/*
* make register setting relative to default
* from INI file & cap value
*/
value = cycpwrThr1_table[level] -
cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1;
if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
value = (level + 1) * 2;
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1,
value);
AR_PHY_TIMING5_CYCPWR_THR1, value);
/*
* set AR_PHY_EXT_CCA for extension channel
* make register setting relative to default
* from INI file & cap value
*/
value2 = cycpwrThr1_table[level] -
cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1Ext;
if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
if (IS_CHAN_HT40(ah->curchan))
REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, value);
if (level != aniState->spurImmunityLevel) {
ath_dbg(common, ANI,
@ -1124,7 +1061,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL,
value2,
value,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;

View File

@ -23,8 +23,8 @@
#define COMP_HDR_LEN 4
#define COMP_CKSUM_LEN 2
#define LE16(x) __constant_cpu_to_le16(x)
#define LE32(x) __constant_cpu_to_le32(x)
#define LE16(x) cpu_to_le16(x)
#define LE32(x) cpu_to_le32(x)
/* Local defines to distinguish between extension and control CTL's */
#define EXT_ADDITIVE (0x8000)
@ -4792,43 +4792,54 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
tempslope:
if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4;
/*
* AR955x has tempSlope register for each chain.
* Check whether temp_compensation feature is enabled or not.
*/
if (eep->baseEepHeader.featureEnable & 0x1) {
if (frequency < 4000) {
REG_RMW_FIELD(ah, AR_PHY_TPC_19,
AR_PHY_TPC_19_ALPHA_THERM,
eep->base_ext2.tempSlopeLow);
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
AR_PHY_TPC_19_ALPHA_THERM,
temp_slope);
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
AR_PHY_TPC_19_ALPHA_THERM,
eep->base_ext2.tempSlopeHigh);
if (txmask & BIT(0))
REG_RMW_FIELD(ah, AR_PHY_TPC_19,
AR_PHY_TPC_19_ALPHA_THERM,
eep->base_ext2.tempSlopeLow);
if (txmask & BIT(1))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
AR_PHY_TPC_19_ALPHA_THERM,
temp_slope);
if (txmask & BIT(2))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
AR_PHY_TPC_19_ALPHA_THERM,
eep->base_ext2.tempSlopeHigh);
} else {
REG_RMW_FIELD(ah, AR_PHY_TPC_19,
AR_PHY_TPC_19_ALPHA_THERM,
temp_slope);
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
AR_PHY_TPC_19_ALPHA_THERM,
temp_slope1);
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
AR_PHY_TPC_19_ALPHA_THERM,
temp_slope2);
if (txmask & BIT(0))
REG_RMW_FIELD(ah, AR_PHY_TPC_19,
AR_PHY_TPC_19_ALPHA_THERM,
temp_slope);
if (txmask & BIT(1))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
AR_PHY_TPC_19_ALPHA_THERM,
temp_slope1);
if (txmask & BIT(2))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
AR_PHY_TPC_19_ALPHA_THERM,
temp_slope2);
}
} else {
/*
* If temp compensation is not enabled,
* set all registers to 0.
*/
REG_RMW_FIELD(ah, AR_PHY_TPC_19,
AR_PHY_TPC_19_ALPHA_THERM, 0);
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
AR_PHY_TPC_19_ALPHA_THERM, 0);
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
AR_PHY_TPC_19_ALPHA_THERM, 0);
if (txmask & BIT(0))
REG_RMW_FIELD(ah, AR_PHY_TPC_19,
AR_PHY_TPC_19_ALPHA_THERM, 0);
if (txmask & BIT(1))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
AR_PHY_TPC_19_ALPHA_THERM, 0);
if (txmask & BIT(2))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
AR_PHY_TPC_19_ALPHA_THERM, 0);
}
} else {
REG_RMW_FIELD(ah, AR_PHY_TPC_19,

View File

@ -403,20 +403,10 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
#define ATH_BCBUF 8
#define ATH_DEFAULT_BINTVAL 100 /* TU */
#define ATH_DEFAULT_BMISS_LIMIT 10
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
struct ath_beacon_config {
int beacon_interval;
u16 dtim_period;
u16 bmiss_timeout;
u8 dtim_count;
bool enable_beacon;
bool ibss_creator;
};
struct ath_beacon {
enum {
OK, /* no change needed */
@ -426,11 +416,9 @@ struct ath_beacon {
u32 beaconq;
u32 bmisscnt;
u32 bc_tstamp;
struct ieee80211_vif *bslot[ATH_BCBUF];
int slottime;
int slotupdate;
struct ath9k_tx_queue_info beacon_qi;
struct ath_descdma bdma;
struct ath_txq *cabq;
struct list_head bbuf;
@ -697,15 +685,6 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define MAX_GTT_CNT 5
enum sc_op_flags {
SC_OP_INVALID,
SC_OP_BEACONS,
SC_OP_ANI_RUN,
SC_OP_PRIM_STA_VIF,
SC_OP_HW_RESET,
SC_OP_SCANNING,
};
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
#define PS_WAIT_FOR_CAB BIT(1)
@ -735,7 +714,6 @@ struct ath_softc {
struct completion paprd_complete;
wait_queue_head_t tx_wait;
unsigned long sc_flags;
unsigned long driver_data;
u8 gtt_cnt;

View File

@ -328,7 +328,7 @@ void ath9k_beacon_tasklet(unsigned long data)
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
ath_dbg(common, RESET,
"reset work is pending, skip beaconing now\n");
return;
@ -447,33 +447,6 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
ath9k_hw_enable_interrupts(ah);
}
/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
{
u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
tsf_mod = tsf & (BIT(10) - 1);
tsf_hi = tsf >> 32;
tsf_lo = ((u32) tsf) >> 10;
mod_hi = tsf_hi % div_tu;
mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
return (mod_lo << 10) | tsf_mod;
}
static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
unsigned int interval)
{
struct ath_hw *ah = sc->sc_ah;
unsigned int offset;
tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
offset = ath9k_mod_tsf64_tu(tsf, interval);
return (u32) tsf + TU_TO_USEC(interval) - offset;
}
/*
* For multi-bss ap support beacons are either staggered evenly over N slots or
* burst together. For the former arrange for the SWBA to be delivered for each
@ -483,109 +456,18 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
intval = TU_TO_USEC(conf->beacon_interval);
intval /= ATH_BCBUF;
nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
else
ah->imask &= ~ATH9K_INT_SWBA;
ath_dbg(common, BEACON,
"AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
(conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
ath9k_beacon_init(sc, nexttbtt, intval, false);
ath9k_cmn_beacon_config_ap(ah, conf, ATH_BCBUF);
ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, false);
}
/*
* This sets up the beacon timers according to the timestamp of the last
* received beacon and the current TSF, configures PCF and DTIM
* handling, programs the sleep registers so the hardware will wakeup in
* time to receive beacons, and configures the beacon miss handling so
* we'll receive a BMISS interrupt when we stop seeing beacons from the AP
* we've associated with.
*/
static void ath9k_beacon_config_sta(struct ath_softc *sc,
static void ath9k_beacon_config_sta(struct ath_hw *ah,
struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
int dtim_intval;
u32 nexttbtt = 0, intval;
u64 tsf;
/* No need to configure beacon if we are not associated */
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
ath_dbg(common, BEACON,
"STA is not yet associated..skipping beacon config\n");
if (ath9k_cmn_beacon_config_sta(ah, conf, &bs) == -EPERM)
return;
}
memset(&bs, 0, sizeof(bs));
intval = conf->beacon_interval;
/*
* Setup dtim parameters according to
* last beacon we received (which may be none).
*/
dtim_intval = intval * conf->dtim_period;
/*
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim state for the result.
*/
tsf = ath9k_hw_gettsf64(ah);
nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
bs.bs_intval = TU_TO_USEC(intval);
bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
bs.bs_nexttbtt = nexttbtt;
bs.bs_nextdtim = nexttbtt;
if (conf->dtim_period > 1)
bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
/*
* Calculate the number of consecutive beacons to miss* before taking
* a BMISS interrupt. The configuration is specified in TU so we only
* need calculate based on the beacon interval. Note that we clamp the
* result to at most 15 beacons.
*/
bs.bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, intval);
if (bs.bs_bmissthreshold > 15)
bs.bs_bmissthreshold = 15;
else if (bs.bs_bmissthreshold <= 0)
bs.bs_bmissthreshold = 1;
/*
* Calculate sleep duration. The configuration is given in ms.
* We ensure a multiple of the beacon period is used. Also, if the sleep
* duration is greater than the DTIM period then it makes senses
* to make it a multiple of that.
*
* XXX fixed at 100ms
*/
bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
intval));
if (bs.bs_sleepduration > bs.bs_dtimperiod)
bs.bs_sleepduration = bs.bs_dtimperiod;
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration);
/* Set the computed STA beacon timers */
ath9k_hw_disable_interrupts(ah);
ath9k_hw_set_sta_beacon_timers(ah, &bs);
@ -600,36 +482,19 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
u32 intval, nexttbtt;
ath9k_reset_beacon_status(sc);
intval = TU_TO_USEC(conf->beacon_interval);
ath9k_cmn_beacon_config_adhoc(ah, conf);
if (conf->ibss_creator)
nexttbtt = intval;
else
nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
else
ah->imask &= ~ATH9K_INT_SWBA;
ath_dbg(common, BEACON,
"IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
(conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator);
ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, conf->ibss_creator);
/*
* Set the global 'beacon has been configured' flag for the
* joiner case in IBSS mode.
*/
if (!conf->ibss_creator && conf->enable_beacon)
set_bit(SC_OP_BEACONS, &sc->sc_flags);
set_bit(ATH_OP_BEACONS, &common->op_flags);
}
static bool ath9k_allow_beacon_config(struct ath_softc *sc,
@ -649,7 +514,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
if ((vif->type == NL80211_IFTYPE_STATION) &&
test_bit(SC_OP_BEACONS, &sc->sc_flags) &&
test_bit(ATH_OP_BEACONS, &common->op_flags) &&
!avp->primary_sta_vif) {
ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
@ -700,6 +565,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
{
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
unsigned long flags;
bool skip_beacon = false;
@ -712,7 +579,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
set_bit(SC_OP_BEACONS, &sc->sc_flags);
set_bit(ATH_OP_BEACONS, &common->op_flags);
return;
}
@ -751,13 +618,13 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
}
/*
* Do not set the SC_OP_BEACONS flag for IBSS joiner mode
* Do not set the ATH_OP_BEACONS flag for IBSS joiner mode
* here, it is done in ath9k_beacon_config_adhoc().
*/
if (cur_conf->enable_beacon && !skip_beacon)
set_bit(SC_OP_BEACONS, &sc->sc_flags);
set_bit(ATH_OP_BEACONS, &common->op_flags);
else
clear_bit(SC_OP_BEACONS, &sc->sc_flags);
clear_bit(ATH_OP_BEACONS, &common->op_flags);
}
}
@ -775,7 +642,7 @@ void ath9k_set_beacon(struct ath_softc *sc)
ath9k_beacon_config_adhoc(sc, cur_conf);
break;
case NL80211_IFTYPE_STATION:
ath9k_beacon_config_sta(sc, cur_conf);
ath9k_beacon_config_sta(sc->sc_ah, cur_conf);
break;
default:
ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");

View File

@ -0,0 +1,180 @@
/*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "common.h"
#define FUDGE 2
/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
{
u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
tsf_mod = tsf & (BIT(10) - 1);
tsf_hi = tsf >> 32;
tsf_lo = ((u32) tsf) >> 10;
mod_hi = tsf_hi % div_tu;
mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
return (mod_lo << 10) | tsf_mod;
}
static u32 ath9k_get_next_tbtt(struct ath_hw *ah, u64 tsf,
unsigned int interval)
{
unsigned int offset;
tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
offset = ath9k_mod_tsf64_tu(tsf, interval);
return (u32) tsf + TU_TO_USEC(interval) - offset;
}
/*
* This sets up the beacon timers according to the timestamp of the last
* received beacon and the current TSF, configures PCF and DTIM
* handling, programs the sleep registers so the hardware will wakeup in
* time to receive beacons, and configures the beacon miss handling so
* we'll receive a BMISS interrupt when we stop seeing beacons from the AP
* we've associated with.
*/
int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
struct ath_beacon_config *conf,
struct ath9k_beacon_state *bs)
{
struct ath_common *common = ath9k_hw_common(ah);
int dtim_intval;
u64 tsf;
/* No need to configure beacon if we are not associated */
if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
ath_dbg(common, BEACON,
"STA is not yet associated..skipping beacon config\n");
return -EPERM;
}
memset(bs, 0, sizeof(*bs));
conf->intval = conf->beacon_interval;
/*
* Setup dtim parameters according to
* last beacon we received (which may be none).
*/
dtim_intval = conf->intval * conf->dtim_period;
/*
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim state for the result.
*/
tsf = ath9k_hw_gettsf64(ah);
conf->nexttbtt = ath9k_get_next_tbtt(ah, tsf, conf->intval);
bs->bs_intval = TU_TO_USEC(conf->intval);
bs->bs_dtimperiod = conf->dtim_period * bs->bs_intval;
bs->bs_nexttbtt = conf->nexttbtt;
bs->bs_nextdtim = conf->nexttbtt;
if (conf->dtim_period > 1)
bs->bs_nextdtim = ath9k_get_next_tbtt(ah, tsf, dtim_intval);
/*
* Calculate the number of consecutive beacons to miss* before taking
* a BMISS interrupt. The configuration is specified in TU so we only
* need calculate based on the beacon interval. Note that we clamp the
* result to at most 15 beacons.
*/
bs->bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, conf->intval);
if (bs->bs_bmissthreshold > 15)
bs->bs_bmissthreshold = 15;
else if (bs->bs_bmissthreshold <= 0)
bs->bs_bmissthreshold = 1;
/*
* Calculate sleep duration. The configuration is given in ms.
* We ensure a multiple of the beacon period is used. Also, if the sleep
* duration is greater than the DTIM period then it makes senses
* to make it a multiple of that.
*
* XXX fixed at 100ms
*/
bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
conf->intval));
if (bs->bs_sleepduration > bs->bs_dtimperiod)
bs->bs_sleepduration = bs->bs_dtimperiod;
/* TSF out of range threshold fixed at 1 second */
bs->bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
bs->bs_bmissthreshold, bs->bs_sleepduration);
return 0;
}
EXPORT_SYMBOL(ath9k_cmn_beacon_config_sta);
void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
struct ath_beacon_config *conf)
{
struct ath_common *common = ath9k_hw_common(ah);
conf->intval = TU_TO_USEC(conf->beacon_interval);
if (conf->ibss_creator)
conf->nexttbtt = conf->intval;
else
conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
else
ah->imask &= ~ATH9K_INT_SWBA;
ath_dbg(common, BEACON,
"IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
(conf->enable_beacon) ? "Enable" : "Disable",
conf->nexttbtt, conf->intval, conf->beacon_interval);
}
EXPORT_SYMBOL(ath9k_cmn_beacon_config_adhoc);
/*
* For multi-bss ap support beacons are either staggered evenly over N slots or
* burst together. For the former arrange for the SWBA to be delivered for each
* slot. Slots that are not occupied will generate nothing.
*/
void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
struct ath_beacon_config *conf,
unsigned int bc_buf)
{
struct ath_common *common = ath9k_hw_common(ah);
/* NB: the beacon interval is kept internally in TU's */
conf->intval = TU_TO_USEC(conf->beacon_interval);
conf->intval /= bc_buf;
conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
else
ah->imask &= ~ATH9K_INT_SWBA;
ath_dbg(common, BEACON,
"AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
(conf->enable_beacon) ? "Enable" : "Disable",
conf->nexttbtt, conf->intval, conf->beacon_interval);
}
EXPORT_SYMBOL(ath9k_cmn_beacon_config_ap);

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2009-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
struct ath_beacon_config;
int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
struct ath_beacon_config *conf,
struct ath9k_beacon_state *bs);
void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
struct ath_beacon_config *conf);
void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
struct ath_beacon_config *conf,
unsigned int bc_buf);

View File

@ -22,6 +22,7 @@
#include "hw-ops.h"
#include "common-init.h"
#include "common-beacon.h"
/* Common header for Atheros 802.11n base driver cores */
@ -44,6 +45,19 @@
#define ATH_EP_RND(x, mul) \
(((x) + ((mul)/2)) / (mul))
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
struct ath_beacon_config {
int beacon_interval;
u16 dtim_period;
u16 bmiss_timeout;
u8 dtim_count;
bool enable_beacon;
bool ibss_creator;
u32 nexttbtt;
u32 intval;
};
bool ath9k_cmn_rx_accept(struct ath_common *common,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rxs,

View File

@ -139,43 +139,41 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
const unsigned int size = 1024;
ssize_t retval = 0;
char *buf;
int i;
struct {
const char *name;
unsigned int val;
} ani_info[] = {
{ "ANI RESET", ah->stats.ast_ani_reset },
{ "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
{ "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
{ "SPUR UP", ah->stats.ast_ani_spurup },
{ "SPUR DOWN", ah->stats.ast_ani_spurup },
{ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
{ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
{ "MRC-CCK ON", ah->stats.ast_ani_ccklow },
{ "MRC-CCK OFF", ah->stats.ast_ani_cckhigh },
{ "FIR-STEP UP", ah->stats.ast_ani_stepup },
{ "FIR-STEP DOWN", ah->stats.ast_ani_stepdown },
{ "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero },
{ "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs },
{ "CCK ERRORS", ah->stats.ast_ani_cckerrs },
};
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
if (common->disable_ani) {
len += scnprintf(buf + len, size - len, "%s: %s\n",
"ANI", "DISABLED");
goto exit;
}
len += scnprintf(buf + len, size - len, "%15s: %s\n", "ANI",
common->disable_ani ? "DISABLED" : "ENABLED");
if (common->disable_ani)
goto exit;
for (i = 0; i < ARRAY_SIZE(ani_info); i++)
len += scnprintf(buf + len, size - len, "%15s: %u\n",
ani_info[i].name, ani_info[i].val);
len += scnprintf(buf + len, size - len, "%15s: %s\n",
"ANI", "ENABLED");
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"ANI RESET", ah->stats.ast_ani_reset);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"SPUR UP", ah->stats.ast_ani_spurup);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"SPUR DOWN", ah->stats.ast_ani_spurup);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"MRC-CCK ON", ah->stats.ast_ani_ccklow);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"FIR-STEP UP", ah->stats.ast_ani_stepup);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"CCK ERRORS", ah->stats.ast_ani_cckerrs);
exit:
if (len > size)
len = size;
@ -210,7 +208,7 @@ static ssize_t write_file_ani(struct file *file,
common->disable_ani = !ani;
if (common->disable_ani) {
clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_stop_ani(sc);
} else {
ath_check_ani(sc);

View File

@ -39,7 +39,6 @@
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
#define ATH_DEFAULT_BMISS_LIMIT 10
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
#define TSF_TO_TU(_h, _l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@ -406,12 +405,18 @@ static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
#define DEFAULT_SWBA_RESPONSE 40 /* in TUs */
#define MIN_SWBA_RESPONSE 10 /* in TUs */
struct htc_beacon_config {
struct htc_beacon {
enum {
OK, /* no change needed */
UPDATE, /* update pending */
COMMIT /* beacon sent, commit change */
} updateslot; /* slot time update fsm */
struct ieee80211_vif *bslot[ATH9K_HTC_MAX_BCN_VIF];
u16 beacon_interval;
u16 dtim_period;
u16 bmiss_timeout;
u32 bmiss_cnt;
u32 bmisscnt;
u32 beaconq;
int slottime;
int slotupdate;
};
struct ath_btcoex {
@ -439,12 +444,8 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#define OP_INVALID BIT(0)
#define OP_SCANNING BIT(1)
#define OP_ENABLE_BEACON BIT(2)
#define OP_BT_PRIORITY_DETECTED BIT(3)
#define OP_BT_SCAN BIT(4)
#define OP_ANI_RUNNING BIT(5)
#define OP_TSF_RESET BIT(6)
struct ath9k_htc_priv {
@ -489,7 +490,8 @@ struct ath9k_htc_priv {
struct ath9k_hw_cal_data caldata;
spinlock_t beacon_lock;
struct htc_beacon_config cur_beacon_conf;
struct ath_beacon_config cur_beacon_conf;
struct htc_beacon beacon;
struct ath9k_htc_rx rx;
struct ath9k_htc_tx tx;
@ -514,7 +516,6 @@ struct ath9k_htc_priv {
struct work_struct led_work;
#endif
int beaconq;
int cabq;
int hwq_map[IEEE80211_NUM_ACS];

View File

@ -26,7 +26,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
ath9k_hw_get_txq_props(ah, priv->beaconq, &qi);
ath9k_hw_get_txq_props(ah, priv->beacon.beaconq, &qi);
if (priv->ah->opmode == NL80211_IFTYPE_AP ||
priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
@ -54,105 +54,52 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
}
if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
if (!ath9k_hw_set_txq_props(ah, priv->beacon.beaconq, &qi)) {
ath_err(ath9k_hw_common(ah),
"Unable to update beacon queue %u!\n", priv->beaconq);
"Unable to update beacon queue %u!\n", priv->beacon.beaconq);
} else {
ath9k_hw_resettxqueue(ah, priv->beaconq);
ath9k_hw_resettxqueue(ah, priv->beacon.beaconq);
}
}
/*
* Both nexttbtt and intval have to be in usecs.
*/
static void ath9k_htc_beacon_init(struct ath9k_htc_priv *priv,
struct ath_beacon_config *conf,
bool reset_tsf)
{
struct ath_hw *ah = priv->ah;
int ret __attribute__ ((unused));
__be32 htc_imask = 0;
u8 cmd_rsp;
if (conf->intval >= TU_TO_USEC(DEFAULT_SWBA_RESPONSE))
ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
else
ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
WMI_CMD(WMI_DISABLE_INTR_CMDID);
if (reset_tsf)
ath9k_hw_reset_tsf(ah);
ath9k_htc_beaconq_config(priv);
ath9k_hw_beaconinit(ah, conf->nexttbtt, conf->intval);
priv->beacon.bmisscnt = 0;
htc_imask = cpu_to_be32(ah->imask);
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
}
static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
struct htc_beacon_config *bss_conf)
struct ath_beacon_config *bss_conf)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_beacon_state bs;
enum ath9k_int imask = 0;
int dtimperiod, dtimcount;
int bmiss_timeout;
u32 nexttbtt = 0, intval, tsftu;
__be32 htc_imask = 0;
u64 tsf;
int num_beacons, offset, dtim_dec_count;
int ret __attribute__ ((unused));
u8 cmd_rsp;
memset(&bs, 0, sizeof(bs));
intval = bss_conf->beacon_interval;
bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
/*
* Setup dtim parameters according to
* last beacon we received (which may be none).
*/
dtimperiod = bss_conf->dtim_period;
if (dtimperiod <= 0) /* NB: 0 if not known */
dtimperiod = 1;
dtimcount = 1;
if (dtimcount >= dtimperiod) /* NB: sanity check */
dtimcount = 0;
/*
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim state for the result.
*/
tsf = ath9k_hw_gettsf64(priv->ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
num_beacons = tsftu / intval + 1;
offset = tsftu % intval;
nexttbtt = tsftu - offset;
if (offset)
nexttbtt += intval;
/* DTIM Beacon every dtimperiod Beacon */
dtim_dec_count = num_beacons % dtimperiod;
dtimcount -= dtim_dec_count;
if (dtimcount < 0)
dtimcount += dtimperiod;
bs.bs_intval = TU_TO_USEC(intval);
bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
/*
* Calculate the number of consecutive beacons to miss* before taking
* a BMISS interrupt. The configuration is specified in TU so we only
* need calculate based on the beacon interval. Note that we clamp the
* result to at most 15 beacons.
*/
bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
if (bs.bs_bmissthreshold > 15)
bs.bs_bmissthreshold = 15;
else if (bs.bs_bmissthreshold <= 0)
bs.bs_bmissthreshold = 1;
/*
* Calculate sleep duration. The configuration is given in ms.
* We ensure a multiple of the beacon period is used. Also, if the sleep
* duration is greater than the DTIM period then it makes senses
* to make it a multiple of that.
*
* XXX fixed at 100ms
*/
bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
intval));
if (bs.bs_sleepduration > bs.bs_dtimperiod)
bs.bs_sleepduration = bs.bs_dtimperiod;
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
intval, tsf, tsftu);
ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration);
/* Set the computed STA beacon timers */
if (ath9k_cmn_beacon_config_sta(priv->ah, bss_conf, &bs) == -EPERM)
return;
WMI_CMD(WMI_DISABLE_INTR_CMDID);
ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
@ -162,104 +109,23 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
}
static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
struct htc_beacon_config *bss_conf)
struct ath_beacon_config *conf)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
enum ath9k_int imask = 0;
u32 nexttbtt, intval, tsftu;
__be32 htc_imask = 0;
int ret __attribute__ ((unused));
u8 cmd_rsp;
u64 tsf;
struct ath_hw *ah = priv->ah;
ah->imask = 0;
intval = bss_conf->beacon_interval;
intval /= ATH9K_HTC_MAX_BCN_VIF;
nexttbtt = intval;
/*
* To reduce beacon misses under heavy TX load,
* set the beacon response time to a larger value.
*/
if (intval > DEFAULT_SWBA_RESPONSE)
priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
else
priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
ath9k_hw_reset_tsf(priv->ah);
clear_bit(OP_TSF_RESET, &priv->op_flags);
} else {
/*
* Pull nexttbtt forward to reflect the current TSF.
*/
tsf = ath9k_hw_gettsf64(priv->ah);
tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
do {
nexttbtt += intval;
} while (nexttbtt < tsftu);
}
if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
imask |= ATH9K_INT_SWBA;
ath_dbg(common, CONFIG,
"AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
bss_conf->beacon_interval, nexttbtt,
priv->ah->config.sw_beacon_response_time, imask);
ath9k_htc_beaconq_config(priv);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
priv->cur_beacon_conf.bmiss_cnt = 0;
htc_imask = cpu_to_be32(imask);
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
ath9k_cmn_beacon_config_ap(ah, conf, ATH9K_HTC_MAX_BCN_VIF);
ath9k_htc_beacon_init(priv, conf, false);
}
static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
struct htc_beacon_config *bss_conf)
struct ath_beacon_config *conf)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
enum ath9k_int imask = 0;
u32 nexttbtt, intval, tsftu;
__be32 htc_imask = 0;
int ret __attribute__ ((unused));
u8 cmd_rsp;
u64 tsf;
struct ath_hw *ah = priv->ah;
ah->imask = 0;
intval = bss_conf->beacon_interval;
nexttbtt = intval;
/*
* Pull nexttbtt forward to reflect the current TSF.
*/
tsf = ath9k_hw_gettsf64(priv->ah);
tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
do {
nexttbtt += intval;
} while (nexttbtt < tsftu);
/*
* Only one IBSS interfce is allowed.
*/
if (intval > DEFAULT_SWBA_RESPONSE)
priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
else
priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
imask |= ATH9K_INT_SWBA;
ath_dbg(common, CONFIG,
"IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n",
bss_conf->beacon_interval, nexttbtt,
priv->ah->config.sw_beacon_response_time, imask);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
priv->cur_beacon_conf.bmiss_cnt = 0;
htc_imask = cpu_to_be32(imask);
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
ath9k_cmn_beacon_config_adhoc(ah, conf);
ath9k_htc_beacon_init(priv, conf, conf->ibss_creator);
}
void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
@ -279,7 +145,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
vif = priv->cur_beacon_conf.bslot[slot];
vif = priv->beacon.bslot[slot];
skb = ieee80211_get_buffered_bc(priv->hw, vif);
@ -340,10 +206,10 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
vif = priv->cur_beacon_conf.bslot[slot];
vif = priv->beacon.bslot[slot];
avp = (struct ath9k_htc_vif *)vif->drv_priv;
if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) {
if (unlikely(test_bit(ATH_OP_SCANNING, &common->op_flags))) {
spin_unlock_bh(&priv->beacon_lock);
return;
}
@ -423,8 +289,8 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
int slot;
if (swba->beacon_pending != 0) {
priv->cur_beacon_conf.bmiss_cnt++;
if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
priv->beacon.bmisscnt++;
if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
ieee80211_queue_work(priv->hw,
&priv->fatal_work);
@ -432,16 +298,16 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
return;
}
if (priv->cur_beacon_conf.bmiss_cnt) {
if (priv->beacon.bmisscnt) {
ath_dbg(common, BSTUCK,
"Resuming beacon xmit after %u misses\n",
priv->cur_beacon_conf.bmiss_cnt);
priv->cur_beacon_conf.bmiss_cnt = 0;
priv->beacon.bmisscnt);
priv->beacon.bmisscnt = 0;
}
slot = ath9k_htc_choose_bslot(priv, swba);
spin_lock_bh(&priv->beacon_lock);
if (priv->cur_beacon_conf.bslot[slot] == NULL) {
if (priv->beacon.bslot[slot] == NULL) {
spin_unlock_bh(&priv->beacon_lock);
return;
}
@ -460,13 +326,13 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) {
if (priv->cur_beacon_conf.bslot[i] == NULL) {
if (priv->beacon.bslot[i] == NULL) {
avp->bslot = i;
break;
}
}
priv->cur_beacon_conf.bslot[avp->bslot] = vif;
priv->beacon.bslot[avp->bslot] = vif;
spin_unlock_bh(&priv->beacon_lock);
ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
@ -480,7 +346,7 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
spin_lock_bh(&priv->beacon_lock);
priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
priv->beacon.bslot[avp->bslot] = NULL;
spin_unlock_bh(&priv->beacon_lock);
ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
@ -496,7 +362,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
u64 tsfadjust;
if (avp->bslot == 0)
@ -528,7 +394,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
bool beacon_configured;
@ -583,7 +449,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
@ -619,7 +485,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
switch (priv->ah->opmode) {
case NL80211_IFTYPE_STATION:

View File

@ -405,8 +405,8 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
priv->hwq_map[i] = -1;
priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
if (priv->beaconq == -1) {
priv->beacon.beaconq = ath9k_hw_beaconq_setup(priv->ah);
if (priv->beacon.beaconq == -1) {
ath_err(common, "Unable to setup BEACON xmit queue\n");
goto err;
}
@ -459,8 +459,6 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
struct ath_common *common;
int i, ret = 0, csz = 0;
set_bit(OP_INVALID, &priv->op_flags);
ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
if (!ah)
return -ENOMEM;
@ -485,6 +483,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
common->priv = priv;
common->debug_mask = ath9k_debug;
common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
set_bit(ATH_OP_INVALID, &common->op_flags);
spin_lock_init(&priv->beacon_lock);
spin_lock_init(&priv->tx.tx_lock);
@ -520,7 +519,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
goto err_queues;
for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
priv->cur_beacon_conf.bslot[i] = NULL;
priv->beacon.bslot[i] = NULL;
priv->beacon.slottime = ATH9K_SLOT_TIME_9;
ath9k_cmn_init_channels_rates(common);
ath9k_cmn_init_crypto(ah);

View File

@ -250,7 +250,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
u8 cmd_rsp;
int ret;
if (test_bit(OP_INVALID, &priv->op_flags))
if (test_bit(ATH_OP_INVALID, &common->op_flags))
return -EIO;
fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@ -304,7 +304,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
htc_start(priv->htc);
if (!test_bit(OP_SCANNING, &priv->op_flags) &&
if (!test_bit(ATH_OP_SCANNING, &common->op_flags) &&
!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
ath9k_htc_vif_reconfig(priv);
@ -748,7 +748,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
common->ani.shortcal_timer = timestamp;
common->ani.checkani_timer = timestamp;
set_bit(OP_ANI_RUNNING, &priv->op_flags);
set_bit(ATH_OP_ANI_RUN, &common->op_flags);
ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@ -756,8 +756,9 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
cancel_delayed_work_sync(&priv->ani_work);
clear_bit(OP_ANI_RUNNING, &priv->op_flags);
clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
}
void ath9k_htc_ani_work(struct work_struct *work)
@ -942,7 +943,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG,
"Failed to update capability in target\n");
clear_bit(OP_INVALID, &priv->op_flags);
clear_bit(ATH_OP_INVALID, &common->op_flags);
htc_start(priv->htc);
spin_lock_bh(&priv->tx.tx_lock);
@ -971,7 +972,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
if (test_bit(OP_INVALID, &priv->op_flags)) {
if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
@ -1013,7 +1014,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
ath9k_htc_ps_restore(priv);
ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
set_bit(OP_INVALID, &priv->op_flags);
set_bit(ATH_OP_INVALID, &common->op_flags);
ath_dbg(common, CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
@ -1087,7 +1088,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
!test_bit(OP_ANI_RUNNING, &priv->op_flags)) {
!test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
ath9k_hw_set_tsfadjust(priv->ah, true);
ath9k_htc_start_ani(priv);
}
@ -1245,13 +1246,14 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
u32 rfilt;
mutex_lock(&priv->mutex);
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
if (test_bit(OP_INVALID, &priv->op_flags)) {
if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(ath9k_hw_common(priv->ah), ANY,
"Unable to configure filter on invalid state\n");
mutex_unlock(&priv->mutex);
@ -1476,6 +1478,7 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
common->curaid = bss_conf->aid;
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
}
}
@ -1497,6 +1500,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
int slottime;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
@ -1508,6 +1512,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->assoc ?
priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
if (!bss_conf->assoc)
clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_htc_choose_set_bssid(priv);
if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
@ -1529,7 +1536,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
bss_conf->bssid);
ath9k_htc_set_tsfadjust(priv, vif);
set_bit(OP_ENABLE_BEACON, &priv->op_flags);
priv->cur_beacon_conf.enable_beacon = 1;
ath9k_htc_beacon_config(priv, vif);
}
@ -1543,7 +1550,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
clear_bit(OP_ENABLE_BEACON, &priv->op_flags);
priv->cur_beacon_conf.enable_beacon = 0;
ath9k_htc_beacon_config(priv, vif);
}
}
@ -1569,11 +1576,21 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
ah->slottime = 9;
slottime = 9;
else
ah->slottime = 20;
ath9k_hw_init_global_settings(ah);
slottime = 20;
if (vif->type == NL80211_IFTYPE_AP) {
/*
* Defer update, so that connected stations can adjust
* their settings at the same time.
* See beacon.c for more details
*/
priv->beacon.slottime = slottime;
priv->beacon.updateslot = UPDATE;
} else {
ah->slottime = slottime;
ath9k_hw_init_global_settings(ah);
}
}
if (changed & BSS_CHANGED_HT)
@ -1670,10 +1687,11 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
set_bit(OP_SCANNING, &priv->op_flags);
set_bit(ATH_OP_SCANNING, &common->op_flags);
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
ath9k_htc_stop_ani(priv);
@ -1683,10 +1701,11 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
clear_bit(OP_SCANNING, &priv->op_flags);
clear_bit(ATH_OP_SCANNING, &common->op_flags);
spin_unlock_bh(&priv->beacon_lock);
ath9k_htc_ps_wakeup(priv);
ath9k_htc_vif_reconfig(priv);

View File

@ -924,9 +924,10 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
ath9k_hw_startpcureceive(priv->ah, test_bit(ATH_OP_SCANNING, &common->op_flags));
}
static inline void convert_htc_flag(struct ath_rx_status *rx_stats,

View File

@ -882,7 +882,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
if (AR_SREV_9300_20_OR_LATER(ah)) {
@ -3048,6 +3048,7 @@ static struct {
{ AR_SREV_VERSION_9462, "9462" },
{ AR_SREV_VERSION_9550, "9550" },
{ AR_SREV_VERSION_9565, "9565" },
{ AR_SREV_VERSION_9531, "9531" },
};
/* For devices with external radios */

View File

@ -115,13 +115,14 @@ void ath_hw_pll_work(struct work_struct *work)
u32 pll_sqsum;
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_pll_work.work);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
/*
* ensure that the PLL WAR is executed only
* after the STA is associated (or) if the
* beaconing had started in interfaces that
* uses beacons.
*/
if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
return;
if (sc->tx99_state)
@ -414,7 +415,7 @@ void ath_start_ani(struct ath_softc *sc)
unsigned long timestamp = jiffies_to_msecs(jiffies);
if (common->disable_ani ||
!test_bit(SC_OP_ANI_RUN, &sc->sc_flags) ||
!test_bit(ATH_OP_ANI_RUN, &common->op_flags) ||
(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
return;
@ -438,6 +439,7 @@ void ath_stop_ani(struct ath_softc *sc)
void ath_check_ani(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
/*
@ -453,23 +455,23 @@ void ath_check_ani(struct ath_softc *sc)
* Disable ANI only when there are no
* associated stations.
*/
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
goto stop_ani;
}
} else if (ah->opmode == NL80211_IFTYPE_STATION) {
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
goto stop_ani;
}
if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags)) {
set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
if (!test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
set_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_start_ani(sc);
}
return;
stop_ani:
clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_stop_ani(sc);
}

View File

@ -827,7 +827,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
return;
}
if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
async_mask = AR_INTR_MAC_IRQ;

View File

@ -229,16 +229,16 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
clear_bit(ATH_OP_HW_RESET, &common->op_flags);
ath9k_hw_set_interrupts(ah);
ath9k_hw_enable_interrupts(ah);
if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
goto work;
if (ah->opmode == NL80211_IFTYPE_STATION &&
test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
spin_lock_irqsave(&sc->sc_pm_lock, flags);
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@ -336,7 +336,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
int old_pos = -1;
int r;
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
if (test_bit(ATH_OP_INVALID, &common->op_flags))
return -EIO;
offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@ -402,7 +402,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
chan->center_freq);
} else {
/* perform spectral scan if requested. */
if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
sc->spectral_mode == SPECTRAL_CHANSCAN)
ath9k_spectral_scan_trigger(hw);
}
@ -566,6 +566,7 @@ irqreturn_t ath_isr(int irq, void *dev)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
enum ath9k_int status;
u32 sync_cause = 0;
bool sched = false;
@ -575,7 +576,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* touch anything. Note this can happen early
* on if the IRQ is shared.
*/
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
if (test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
/* shared irq, not for us */
@ -583,7 +584,7 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
ath9k_hw_kill_interrupts(ah);
return IRQ_HANDLED;
}
@ -684,10 +685,11 @@ int ath_reset(struct ath_softc *sc)
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
#ifdef CONFIG_ATH9K_DEBUGFS
RESET_STAT_INC(sc, type);
#endif
set_bit(SC_OP_HW_RESET, &sc->sc_flags);
set_bit(ATH_OP_HW_RESET, &common->op_flags);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
@ -768,7 +770,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath_mci_enable(sc);
clear_bit(SC_OP_INVALID, &sc->sc_flags);
clear_bit(ATH_OP_INVALID, &common->op_flags);
sc->sc_ah->is_monitoring = false;
if (!ath_complete_reset(sc, false))
@ -885,7 +887,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_cancel_work(sc);
if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@ -940,7 +942,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
set_bit(SC_OP_INVALID, &sc->sc_flags);
set_bit(ATH_OP_INVALID, &common->op_flags);
sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@ -1081,7 +1083,7 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
*/
if (ah->opmode == NL80211_IFTYPE_STATION &&
old_opmode == NL80211_IFTYPE_AP &&
test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
ieee80211_iterate_active_interfaces_atomic(
sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath9k_sta_vif_iter, sc);
@ -1590,7 +1592,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
unsigned long flags;
set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
avp->primary_sta_vif = true;
/*
@ -1625,8 +1627,9 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath_softc *sc = data;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
if (test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
return;
if (bss_conf->assoc)
@ -1657,18 +1660,18 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid, bss_conf->assoc);
if (avp->primary_sta_vif && !bss_conf->assoc) {
clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
avp->primary_sta_vif = false;
if (ah->opmode == NL80211_IFTYPE_STATION)
clear_bit(SC_OP_BEACONS, &sc->sc_flags);
clear_bit(ATH_OP_BEACONS, &common->op_flags);
}
ieee80211_iterate_active_interfaces_atomic(
sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath9k_bss_assoc_iter, sc);
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags) &&
ah->opmode == NL80211_IFTYPE_STATION) {
memset(common->curbssid, 0, ETH_ALEN);
common->curaid = 0;
@ -1897,7 +1900,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
return;
}
if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@ -2070,13 +2073,15 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
set_bit(SC_OP_SCANNING, &sc->sc_flags);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
set_bit(ATH_OP_SCANNING, &common->op_flags);
}
static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
clear_bit(SC_OP_SCANNING, &sc->sc_flags);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
clear_bit(ATH_OP_SCANNING, &common->op_flags);
}
static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,

View File

@ -555,7 +555,7 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
while (more_data == MCI_GPM_MORE) {
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return;
pgpm = mci->gpm_buf.bf_addr;

View File

@ -784,6 +784,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ath_softc *sc;
struct ieee80211_hw *hw;
struct ath_common *common;
u8 csz;
u32 val;
int ret = 0;
@ -858,9 +859,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->mem = pcim_iomap_table(pdev)[0];
sc->driver_data = id->driver_data;
/* Will be cleared in ath9k_start() */
set_bit(SC_OP_INVALID, &sc->sc_flags);
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
@ -879,6 +877,10 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)sc->mem, pdev->irq);
/* Will be cleared in ath9k_start() */
common = ath9k_hw_common(sc->sc_ah);
set_bit(ATH_OP_INVALID, &common->op_flags);
return 0;
err_init:

View File

@ -108,7 +108,7 @@ static int ath9k_tx99_init(struct ath_softc *sc)
struct ath_tx_control txctl;
int r;
if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_err(common,
"driver is in invalid state unable to use TX99");
return -EINVAL;

View File

@ -198,7 +198,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
ath_cancel_work(sc);
ath_stop_ani(sc);
if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
ret = -EINVAL;
goto fail_wow;
@ -224,7 +224,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
* STA.
*/
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
ath_dbg(common, WOW, "None of the STA vifs are associated\n");
ret = 1;
goto fail_wow;

View File

@ -1699,7 +1699,7 @@ int ath_cabq_update(struct ath_softc *sc)
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
qi.tqi_readyTime = (cur_conf->beacon_interval *
qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
ATH_CABQ_READY_TIME) / 100;
ath_txq_update(sc, qnum, &qi);
@ -1769,7 +1769,7 @@ bool ath_drain_all_txq(struct ath_softc *sc)
int i;
u32 npend = 0;
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
if (test_bit(ATH_OP_INVALID, &common->op_flags))
return true;
ath9k_hw_abort_tx_dma(ah);
@ -1817,11 +1817,12 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
bool sent = false;
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
list_empty(&txq->axq_acq))
return;
@ -2471,7 +2472,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_txq_lock(sc, txq);
for (;;) {
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
if (list_empty(&txq->axq_q)) {
@ -2554,7 +2555,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
int status;
for (;;) {
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);

View File

@ -179,7 +179,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy,
int cid = wil_find_cid(wil, mac);
wil_info(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
if (cid < 0)
return cid;
@ -218,7 +218,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
return -ENOENT;
memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
wil_info(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
rc = wil_cid_fill_sinfo(wil, cid, sinfo);
@ -265,6 +265,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
u16 chnl[4];
} __packed cmd;
uint i, n;
int rc;
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
@ -282,7 +283,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
/* FW don't support scan after connection attempt */
if (test_bit(wil_status_dontscan, &wil->status)) {
wil_err(wil, "Scan after connect attempt not supported\n");
wil_err(wil, "Can't scan now\n");
return -EBUSY;
}
@ -305,8 +306,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
request->channels[i]->center_freq);
}
return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
if (rc)
wil->scan_request = NULL;
return rc;
}
static int wil_cfg80211_connect(struct wiphy *wiphy,
@ -321,6 +327,10 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int ch;
int rc = 0;
if (test_bit(wil_status_fwconnecting, &wil->status) ||
test_bit(wil_status_fwconnected, &wil->status))
return -EALREADY;
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
@ -402,10 +412,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
memcpy(conn.bssid, bss->bssid, ETH_ALEN);
memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
/*
* FW don't support scan after connection attempt
*/
set_bit(wil_status_dontscan, &wil->status);
set_bit(wil_status_fwconnecting, &wil->status);
rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
@ -414,7 +421,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
mod_timer(&wil->connect_timer,
jiffies + msecs_to_jiffies(2000));
} else {
clear_bit(wil_status_dontscan, &wil->status);
clear_bit(wil_status_fwconnecting, &wil->status);
}
@ -603,18 +609,20 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
if (wil_fix_bcon(wil, bcon))
wil_dbg_misc(wil, "Fixed bcon\n");
mutex_lock(&wil->mutex);
rc = wil_reset(wil);
if (rc)
return rc;
goto out;
/* Rx VRING. */
rc = wil_rx_init(wil);
if (rc)
return rc;
goto out;
rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
if (rc)
return rc;
goto out;
/* MAC address - pre-requisite for other commands */
wmi_set_mac_address(wil, ndev->dev_addr);
@ -638,11 +646,13 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
channel->hw_value);
if (rc)
return rc;
goto out;
netif_carrier_on(ndev);
out:
mutex_unlock(&wil->mutex);
return rc;
}
@ -652,8 +662,11 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
int rc = 0;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
mutex_lock(&wil->mutex);
rc = wmi_pcp_stop(wil);
mutex_unlock(&wil->mutex);
return rc;
}
@ -661,7 +674,11 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
struct net_device *dev, u8 *mac)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, mac);
mutex_unlock(&wil->mutex);
return 0;
}

View File

@ -398,6 +398,44 @@ static const struct file_operations fops_reset = {
.open = simple_open,
};
static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
const char *prefix)
{
char printbuf[16 * 3 + 2];
int i = 0;
while (i < len) {
int l = min(len - i, 16);
hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
sizeof(printbuf), false);
seq_printf(s, "%s%s\n", prefix, printbuf);
i += l;
}
}
static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
{
int i = 0;
int len = skb_headlen(skb);
void *p = skb->data;
int nr_frags = skb_shinfo(skb)->nr_frags;
seq_printf(s, " len = %d\n", len);
wil_seq_hexdump(s, p, len, " : ");
if (nr_frags) {
seq_printf(s, " nr_frags = %d\n", nr_frags);
for (i = 0; i < nr_frags; i++) {
const struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
p = skb_frag_address_safe(frag);
seq_printf(s, " [%2d] : len = %d\n", i, len);
wil_seq_hexdump(s, p, len, " : ");
}
}
}
/*---------Tx/Rx descriptor------------*/
static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
{
@ -438,26 +476,9 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, " SKB = %p\n", skb);
if (skb) {
char printbuf[16 * 3 + 2];
int i = 0;
int len = le16_to_cpu(d->dma.length);
void *p = skb->data;
if (len != skb_headlen(skb)) {
seq_printf(s, "!!! len: desc = %d skb = %d\n",
len, skb_headlen(skb));
len = min_t(int, len, skb_headlen(skb));
}
seq_printf(s, " len = %d\n", len);
while (i < len) {
int l = min(len - i, 16);
hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
sizeof(printbuf), false);
seq_printf(s, " : %s\n", printbuf);
i += l;
}
skb_get(skb);
wil_seq_print_skb(s, skb);
kfree_skb(skb);
}
seq_printf(s, "}\n");
} else {
@ -631,7 +652,8 @@ static int wil_sta_debugfs_show(struct seq_file *s, void *data)
status = "connected";
break;
}
seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
(p->data_port_open ? " data_port_open" : ""));
if (p->status == wil_sta_connected) {
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {

View File

@ -195,8 +195,12 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
wil_dbg_irq(wil, "RX done\n");
isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
wil_dbg_txrx(wil, "NAPI schedule\n");
napi_schedule(&wil->napi_rx);
if (test_bit(wil_status_reset_done, &wil->status)) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
napi_schedule(&wil->napi_rx);
} else {
wil_err(wil, "Got Rx interrupt while in reset\n");
}
}
if (isr)
@ -226,10 +230,15 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
wil_dbg_irq(wil, "TX done\n");
napi_schedule(&wil->napi_tx);
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
/* clear also all VRING interrupts */
isr &= ~(BIT(25) - 1UL);
if (test_bit(wil_status_reset_done, &wil->status)) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
napi_schedule(&wil->napi_tx);
} else {
wil_err(wil, "Got Tx interrupt while in reset\n");
}
}
if (isr)
@ -319,6 +328,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
if (isr & ISR_MISC_FW_ERROR) {
wil_notify_fw_error(wil);
isr &= ~ISR_MISC_FW_ERROR;
wil_fw_error_recovery(wil);
}
if (isr & ISR_MISC_MBOX_EVT) {
@ -493,6 +503,23 @@ free0:
return rc;
}
/* can't use wil_ioread32_and_clear because ICC value is not ser yet */
static inline void wil_clear32(void __iomem *addr)
{
u32 x = ioread32(addr);
iowrite32(x, addr);
}
void wil6210_clear_irq(struct wil6210_priv *wil)
{
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
}
int wil6210_init_irq(struct wil6210_priv *wil, int irq)
{

View File

@ -21,6 +21,10 @@
#include "wil6210.h"
#include "txrx.h"
static bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable FW error recovery");
/*
* Due to a hardware issue,
* one has to read/write to/from NIC in 32-bit chunks;
@ -59,6 +63,7 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
uint i;
struct wil_sta_info *sta = &wil->sta[cid];
sta->data_port_open = false;
if (sta->status != wil_sta_unused) {
wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING);
sta->status = wil_sta_unused;
@ -112,8 +117,6 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
GFP_KERNEL);
}
clear_bit(wil_status_fwconnecting, &wil->status);
wil_dbg_misc(wil, "clear_bit(wil_status_dontscan)\n");
clear_bit(wil_status_dontscan, &wil->status);
break;
default:
/* AP-like interface and monitor:
@ -130,7 +133,9 @@ static void wil_disconnect_worker(struct work_struct *work)
struct wil6210_priv *wil = container_of(work,
struct wil6210_priv, disconnect_worker);
mutex_lock(&wil->mutex);
_wil6210_disconnect(wil, NULL);
mutex_unlock(&wil->mutex);
}
static void wil_connect_timer_fn(ulong x)
@ -145,6 +150,38 @@ static void wil_connect_timer_fn(ulong x)
schedule_work(&wil->disconnect_worker);
}
static void wil_fw_error_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work,
struct wil6210_priv, fw_error_worker);
struct wireless_dev *wdev = wil->wdev;
wil_dbg_misc(wil, "fw error worker\n");
if (no_fw_recovery)
return;
mutex_lock(&wil->mutex);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
wil_info(wil, "fw error recovery started...\n");
wil_reset(wil);
/* need to re-allocate Rx ring after reset */
wil_rx_init(wil);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
/* recovery in these modes is done by upper layers */
break;
default:
break;
}
mutex_unlock(&wil->mutex);
}
static int wil_find_free_vring(struct wil6210_priv *wil)
{
int i;
@ -197,6 +234,7 @@ int wil_priv_init(struct wil6210_priv *wil)
INIT_WORK(&wil->connect_worker, wil_connect_worker);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
spin_lock_init(&wil->wmi_ev_lock);
@ -223,7 +261,10 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
void wil_priv_deinit(struct wil6210_priv *wil)
{
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, NULL);
mutex_unlock(&wil->mutex);
wmi_event_flush(wil);
destroy_workqueue(wil->wmi_wq_conn);
destroy_workqueue(wil->wmi_wq);
@ -231,40 +272,78 @@ void wil_priv_deinit(struct wil6210_priv *wil)
static void wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
u32 hw_state;
u32 rev_id;
wil_dbg_misc(wil, "Resetting...\n");
/* register read */
#define R(a) ioread32(wil->csr + HOSTADDR(a))
/* register write */
#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
/* register set = read, OR, write */
#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
wil->csr + HOSTADDR(a))
#define S(a, v) W(a, R(a) | v)
/* register clear = read, AND with inverted, write */
#define C(a, v) W(a, R(a) & ~v)
wil->hw_version = R(RGF_USER_FW_REV_ID);
rev_id = wil->hw_version & 0xff;
/* hpal_perst_from_pad_src_n_mask */
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
/* car_perst_rst_src_n_mask */
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
wmb(); /* order is important here */
W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
if (rev_id == 1) {
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
} else {
W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
}
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
wmb(); /* order is important here */
wil_dbg_misc(wil, "Reset completed\n");
/* wait until device ready */
do {
msleep(1);
hw_state = R(RGF_USER_HW_MACHINE_STATE);
if (delay++ > 100) {
wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
hw_state);
return;
}
} while (hw_state != HW_MACHINE_BOOT_DONE);
if (rev_id == 2)
W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wmb(); /* order is important here */
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay);
#undef R
#undef W
#undef S
#undef C
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@ -299,11 +378,24 @@ int wil_reset(struct wil6210_priv *wil)
{
int rc;
WARN_ON(!mutex_is_locked(&wil->mutex));
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL);
wil->status = 0; /* prevent NAPI from being scheduled */
if (test_bit(wil_status_napi_en, &wil->status)) {
napi_synchronize(&wil->napi_rx);
}
if (wil->scan_request) {
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
cfg80211_scan_done(wil->scan_request, true);
wil->scan_request = NULL;
}
wil6210_disable_irq(wil);
wil->status = 0;
wmi_event_flush(wil);
@ -313,6 +405,8 @@ int wil_reset(struct wil6210_priv *wil)
/* TODO: put MAC in reset */
wil_target_reset(wil);
wil_rx_fini(wil);
/* init after reset */
wil->pending_connect_cid = -1;
reinit_completion(&wil->wmi_ready);
@ -326,6 +420,11 @@ int wil_reset(struct wil6210_priv *wil)
return rc;
}
void wil_fw_error_recovery(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "starting fw error recovery\n");
schedule_work(&wil->fw_error_worker);
}
void wil_link_on(struct wil6210_priv *wil)
{
@ -353,6 +452,8 @@ static int __wil_up(struct wil6210_priv *wil)
struct wireless_dev *wdev = wil->wdev;
int rc;
WARN_ON(!mutex_is_locked(&wil->mutex));
rc = wil_reset(wil);
if (rc)
return rc;
@ -394,6 +495,7 @@ static int __wil_up(struct wil6210_priv *wil)
napi_enable(&wil->napi_rx);
napi_enable(&wil->napi_tx);
set_bit(wil_status_napi_en, &wil->status);
return 0;
}
@ -411,6 +513,9 @@ int wil_up(struct wil6210_priv *wil)
static int __wil_down(struct wil6210_priv *wil)
{
WARN_ON(!mutex_is_locked(&wil->mutex));
clear_bit(wil_status_napi_en, &wil->status);
napi_disable(&wil->napi_rx);
napi_disable(&wil->napi_tx);

View File

@ -127,8 +127,9 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
ndev->netdev_ops = &wil_netdev_ops;
ndev->ieee80211_ptr = wdev;
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GRO;
ndev->features |= ndev->hw_features;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;

View File

@ -68,10 +68,14 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
goto stop_master;
/* need reset here to obtain MAC */
mutex_lock(&wil->mutex);
rc = wil_reset(wil);
mutex_unlock(&wil->mutex);
if (rc)
goto release_irq;
wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
return 0;
release_irq:
@ -149,6 +153,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, wil);
wil->pdev = pdev;
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {

View File

@ -104,6 +104,23 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return 0;
}
static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
struct wil_ctx *ctx)
{
dma_addr_t pa = wil_desc_addr(&d->dma.addr);
u16 dmalen = le16_to_cpu(d->dma.length);
switch (ctx->mapped_as) {
case wil_mapped_as_single:
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
break;
case wil_mapped_as_page:
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
break;
default:
break;
}
}
static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
int tx)
{
@ -122,15 +139,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
ctx = &vring->ctx[vring->swtail];
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
if (vring->ctx[vring->swtail].mapped_as_page) {
dma_unmap_page(dev, pa, dmalen,
DMA_TO_DEVICE);
} else {
dma_unmap_single(dev, pa, dmalen,
DMA_TO_DEVICE);
}
wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_vring_next_tail(vring);
@ -479,7 +488,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
*/
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
gro_result_t rc;
struct wil6210_priv *wil = ndev_to_wil(ndev);
unsigned int len = skb->len;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
@ -488,17 +497,17 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
skb_orphan(skb);
rc = netif_receive_skb(skb);
rc = napi_gro_receive(&wil->napi_rx, skb);
if (likely(rc == NET_RX_SUCCESS)) {
if (unlikely(rc == GRO_DROP)) {
ndev->stats.rx_dropped++;
stats->rx_dropped++;
wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
} else {
ndev->stats.rx_packets++;
stats->rx_packets++;
ndev->stats.rx_bytes += len;
stats->rx_bytes += len;
} else {
ndev->stats.rx_dropped++;
stats->rx_dropped++;
}
}
@ -548,6 +557,11 @@ int wil_rx_init(struct wil6210_priv *wil)
struct vring *vring = &wil->vring_rx;
int rc;
if (vring->va) {
wil_err(wil, "Rx ring already allocated\n");
return -EINVAL;
}
vring->size = WIL6210_RX_RING_SIZE;
rc = wil_vring_alloc(wil, vring);
if (rc)
@ -588,7 +602,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
.ring_size = cpu_to_le16(size),
},
.ringid = id,
.cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
.cidxtid = mk_cidxtid(cid, tid),
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.mac_ctrl = 0,
.to_resolution = 0,
@ -604,6 +618,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
struct wmi_vring_cfg_done_event cmd;
} __packed reply;
struct vring *vring = &wil->vring_tx[id];
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
if (vring->va) {
wil_err(wil, "Tx ring [%d] already allocated\n", id);
@ -611,6 +626,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
goto out;
}
memset(txdata, 0, sizeof(*txdata));
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
@ -634,6 +650,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
}
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
return 0;
out_free:
wil_vring_free(wil, vring, 1);
@ -646,9 +664,16 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
{
struct vring *vring = &wil->vring_tx[id];
WARN_ON(!mutex_is_locked(&wil->mutex));
if (!vring->va)
return;
/* make sure NAPI won't touch this vring */
wil->vring_tx_data[id].enabled = 0;
if (test_bit(wil_status_napi_en, &wil->status))
napi_synchronize(&wil->napi_tx);
wil_vring_free(wil, vring, 1);
}
@ -662,6 +687,10 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
if (cid < 0)
return NULL;
if (!wil->sta[cid].data_port_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
return NULL;
/* TODO: fix for multiple TID */
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
if (wil->vring2cid_tid[i][0] == cid) {
@ -700,12 +729,19 @@ static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
struct vring *v, *v2;
struct sk_buff *skb2;
int i;
u8 cid;
/* find 1-st vring */
/* find 1-st vring eligible for data */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->vring_tx[i];
if (v->va)
goto found;
if (!v->va)
continue;
cid = wil->vring2cid_tid[i][0];
if (!wil->sta[cid].data_port_open)
continue;
goto found;
}
wil_err(wil, "Tx while no vrings active?\n");
@ -721,6 +757,10 @@ found:
v2 = &wil->vring_tx[i];
if (!v2->va)
continue;
cid = wil->vring2cid_tid[i][0];
if (!wil->sta[cid].data_port_open)
continue;
skb2 = skb_copy(skb, GFP_ATOMIC);
if (skb2) {
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
@ -759,6 +799,13 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
return 0;
}
static inline
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
{
d->mac.d[2] |= ((nr_frags + 1) <<
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
}
static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
struct vring_tx_desc *d,
struct sk_buff *skb)
@ -823,8 +870,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "%s()\n", __func__);
if (avail < vring->size/8)
netif_tx_stop_all_queues(wil_to_ndev(wil));
if (avail < 1 + nr_frags) {
wil_err(wil, "Tx ring full. No space for %d fragments\n",
1 + nr_frags);
@ -842,6 +887,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
vring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
/* Process TCP/UDP checksum offloading */
@ -851,8 +897,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
goto dma_error;
}
d->mac.d[2] |= ((nr_frags + 1) <<
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
vring->ctx[i].nr_frags = nr_frags;
wil_tx_desc_set_nr_frags(d, nr_frags);
if (nr_frags)
*_d = *d;
@ -867,8 +913,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
vring->ctx[i].mapped_as = wil_mapped_as_page;
wil_tx_desc_map(d, pa, len, vring_index);
vring->ctx[i].mapped_as_page = 1;
/* no need to check return code -
* if it succeeded for 1-st descriptor,
* it will succeed here too
*/
wil_tx_desc_offload_cksum_set(wil, d, skb);
*_d = *d;
}
/* for the last seg only */
@ -897,7 +948,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
/* unmap what we have mapped */
nr_frags = f + 1; /* frags mapped + one for skb head */
for (f = 0; f < nr_frags; f++) {
u16 dmalen;
struct wil_ctx *ctx;
i = (swhead + f) % vring->size;
@ -905,12 +955,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
_d = &(vring->va[i].tx);
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
if (ctx->mapped_as_page)
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
else
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
@ -927,11 +972,15 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct ethhdr *eth = (void *)skb->data;
struct vring *vring;
static bool pr_once_fw;
int rc;
wil_dbg_txrx(wil, "%s()\n", __func__);
if (!test_bit(wil_status_fwready, &wil->status)) {
wil_err(wil, "FW not ready\n");
if (!pr_once_fw) {
wil_err(wil, "FW not ready\n");
pr_once_fw = true;
}
goto drop;
}
if (!test_bit(wil_status_fwconnected, &wil->status)) {
@ -942,6 +991,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wil_err(wil, "Xmit in monitor mode not supported\n");
goto drop;
}
pr_once_fw = false;
/* find vring */
if (is_unicast_ether_addr(eth->h_dest)) {
@ -956,6 +1006,10 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* set up vring entry */
rc = wil_tx_vring(wil, vring, skb);
/* do we still have enough room in the vring? */
if (wil_vring_avail_tx(vring) < vring->size/8)
netif_tx_stop_all_queues(wil_to_ndev(wil));
switch (rc) {
case 0:
/* statistics will be updated on the tx_complete */
@ -985,69 +1039,82 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
struct net_device *ndev = wil_to_ndev(wil);
struct device *dev = wil_to_dev(wil);
struct vring *vring = &wil->vring_tx[ringid];
struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
int done = 0;
int cid = wil->vring2cid_tid[ringid][0];
struct wil_net_stats *stats = &wil->sta[cid].stats;
volatile struct vring_tx_desc *_d;
if (!vring->va) {
wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
return 0;
}
if (!txdata->enabled) {
wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
return 0;
}
wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
while (!wil_vring_is_empty(vring)) {
volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx;
struct vring_tx_desc dd, *d = &dd;
dma_addr_t pa;
u16 dmalen;
int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
struct sk_buff *skb = ctx->skb;
/**
* For the fragmented skb, HW will set DU bit only for the
* last fragment. look for it
*/
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
/* TODO: check we are not past head */
*d = *_d;
if (!(d->dma.status & TX_DMA_STATUS_DU))
_d = &vring->va[lf].tx;
if (!(_d->dma.status & TX_DMA_STATUS_DU))
break;
dmalen = le16_to_cpu(d->dma.length);
trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
d->dma.error);
wil_dbg_txrx(wil,
"Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
vring->swtail, dmalen, d->dma.status,
d->dma.error);
wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
new_swtail = (lf + 1) % vring->size;
while (vring->swtail != new_swtail) {
struct vring_tx_desc dd, *d = &dd;
u16 dmalen;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
struct sk_buff *skb = ctx->skb;
_d = &vring->va[vring->swtail].tx;
pa = wil_desc_addr(&d->dma.addr);
if (ctx->mapped_as_page)
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
else
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
*d = *_d;
if (skb) {
if (d->dma.error == 0) {
ndev->stats.tx_packets++;
stats->tx_packets++;
ndev->stats.tx_bytes += skb->len;
stats->tx_bytes += skb->len;
} else {
ndev->stats.tx_errors++;
stats->tx_errors++;
dmalen = le16_to_cpu(d->dma.length);
trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
d->dma.error);
wil_dbg_txrx(wil,
"Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
vring->swtail, dmalen, d->dma.status,
d->dma.error);
wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
wil_txdesc_unmap(dev, d, ctx);
if (skb) {
if (d->dma.error == 0) {
ndev->stats.tx_packets++;
stats->tx_packets++;
ndev->stats.tx_bytes += skb->len;
stats->tx_bytes += skb->len;
} else {
ndev->stats.tx_errors++;
stats->tx_errors++;
}
dev_kfree_skb_any(skb);
}
dev_kfree_skb_any(skb);
memset(ctx, 0, sizeof(*ctx));
/* There is no need to touch HW descriptor:
* - ststus bit TX_DMA_STATUS_DU is set by design,
* so hardware will not try to process this desc.,
* - rest of descriptor will be initialized on Tx.
*/
vring->swtail = wil_vring_next_tail(vring);
done++;
}
memset(ctx, 0, sizeof(*ctx));
/*
* There is no need to touch HW descriptor:
* - ststus bit TX_DMA_STATUS_DU is set by design,
* so hardware will not try to process this desc.,
* - rest of descriptor will be initialized on Tx.
*/
vring->swtail = wil_vring_next_tail(vring);
done++;
}
if (wil_vring_avail_tx(vring) > vring->size/4)
netif_tx_wake_all_queues(wil_to_ndev(wil));

View File

@ -74,23 +74,21 @@ struct RGF_ICR {
} __packed;
/* registers - FW addresses */
#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
#define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
#define RGF_USER_MAC_CPU_0 (0x8801fc)
#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
#define RGF_USER_MAC_CPU_0 (0x8801fc)
#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
#define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */
#define RGF_USER_CLKS_CTL_0 (0x880abc)
#define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */
#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
#define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
#define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
#define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
#define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
#define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
@ -105,13 +103,22 @@ struct RGF_ICR {
/* Interrupt moderation control */
#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
#define RGF_DMA_ITR_CNT_DATA (0x881c60)
#define RGF_DMA_ITR_CNT_CRL (0x881C64)
#define RGF_DMA_ITR_CNT_CRL (0x881c64)
#define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
#define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
#define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
#define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
#define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
#define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
#define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
#define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
#define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4)
/* popular locations */
#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
@ -125,6 +132,31 @@ struct RGF_ICR {
/* Hardware definitions end */
/**
* mk_cidxtid - construct @cidxtid field
* @cid: CID value
* @tid: TID value
*
* @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
*/
static inline u8 mk_cidxtid(u8 cid, u8 tid)
{
return ((tid & 0xf) << 4) | (cid & 0xf);
}
/**
* parse_cidxtid - parse @cidxtid field
* @cid: store CID value here
* @tid: store TID value here
*
* @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
*/
static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
{
*cid = cidxtid & 0xf;
*tid = (cidxtid >> 4) & 0xf;
}
struct wil6210_mbox_ring {
u32 base;
u16 entry_size; /* max. size of mbox entry, incl. all headers */
@ -184,12 +216,19 @@ struct pending_wmi_event {
} __packed event;
};
enum { /* for wil_ctx.mapped_as */
wil_mapped_as_none = 0,
wil_mapped_as_single = 1,
wil_mapped_as_page = 2,
};
/**
* struct wil_ctx - software context for Vring descriptor
*/
struct wil_ctx {
struct sk_buff *skb;
u8 mapped_as_page:1;
u8 nr_frags;
u8 mapped_as;
};
union vring_desc;
@ -204,6 +243,14 @@ struct vring {
struct wil_ctx *ctx; /* ctx[size] - software context */
};
/**
* Additional data for Tx Vring
*/
struct vring_tx_data {
int enabled;
};
enum { /* for wil6210_priv.status */
wil_status_fwready = 0,
wil_status_fwconnecting,
@ -211,6 +258,7 @@ enum { /* for wil6210_priv.status */
wil_status_dontscan,
wil_status_reset_done,
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
};
struct pci_dev;
@ -296,6 +344,7 @@ struct wil_sta_info {
u8 addr[ETH_ALEN];
enum wil_sta_status status;
struct wil_net_stats stats;
bool data_port_open; /* can send any data, not only EAPOL */
/* Rx BACK */
struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
@ -309,6 +358,7 @@ struct wil6210_priv {
void __iomem *csr;
ulong status;
u32 fw_version;
u32 hw_version;
u8 n_mids; /* number of additional MIDs as reported by FW */
/* profile */
u32 monitor_flags;
@ -329,6 +379,7 @@ struct wil6210_priv {
struct workqueue_struct *wmi_wq_conn; /* for connect worker */
struct work_struct connect_worker;
struct work_struct disconnect_worker;
struct work_struct fw_error_worker; /* for FW error recovery */
struct timer_list connect_timer;
int pending_connect_cid;
struct list_head pending_wmi_ev;
@ -343,6 +394,7 @@ struct wil6210_priv {
/* DMA related */
struct vring vring_rx;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID];
/* scan */
@ -406,6 +458,7 @@ void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil);
int wil_reset(struct wil6210_priv *wil);
void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_link_on(struct wil6210_priv *wil);
void wil_link_off(struct wil6210_priv *wil);
int wil_up(struct wil6210_priv *wil);
@ -439,6 +492,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
void wil6210_clear_irq(struct wil6210_priv *wil);
int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil6210_disable_irq(struct wil6210_priv *wil);

View File

@ -462,7 +462,9 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
wil->sinfo_gen++;
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, evt->bssid);
mutex_unlock(&wil->mutex);
}
static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
@ -550,9 +552,16 @@ static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wmi_data_port_open_event *evt = d;
u8 cid = evt->cid;
wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
if (cid >= ARRAY_SIZE(wil->sta)) {
wil_err(wil, "Link UP for invalid CID %d\n", cid);
return;
}
wil->sta[cid].data_port_open = true;
netif_carrier_on(ndev);
}
@ -560,10 +569,17 @@ static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wmi_wbe_link_down_event *evt = d;
u8 cid = evt->cid;
wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
evt->cid, le32_to_cpu(evt->reason));
cid, le32_to_cpu(evt->reason));
if (cid >= ARRAY_SIZE(wil->sta)) {
wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
return;
}
wil->sta[cid].data_port_open = false;
netif_carrier_off(ndev);
}

View File

@ -269,26 +269,17 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
break;
}
if (ret) {
/*
* SleepCSR register access can fail when
* waking up the device so reduce this noise
* in the logs.
*/
if (addr != SBSDIO_FUNC1_SLEEPCSR)
brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
write ? "write" : "read", fn, addr, ret);
else
brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
write ? "write" : "read", fn, addr, ret);
}
if (ret)
brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
write ? "write" : "read", fn, addr, ret);
return ret;
}
static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 regsz, void *data, bool write)
{
u8 func_num;
u8 func;
s32 retry = 0;
int ret;
@ -302,9 +293,9 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* The rest: function 1 silicon backplane core registers
*/
if ((addr & ~REG_F0_REG_MASK) == 0)
func_num = SDIO_FUNC_0;
func = SDIO_FUNC_0;
else
func_num = SDIO_FUNC_1;
func = SDIO_FUNC_1;
do {
if (!write)
@ -312,16 +303,26 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
/* for retry wait for 1 ms till bus get settled down */
if (retry)
usleep_range(1000, 2000);
ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
data, write);
} while (ret != 0 && ret != -ENOMEDIUM &&
retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
if (ret == -ENOMEDIUM)
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
else if (ret != 0)
brcmf_err("failed with %d\n", ret);
else if (ret != 0) {
/*
* SleepCSR register access can fail when
* waking up the device so reduce this noise
* in the logs.
*/
if (addr != SBSDIO_FUNC1_SLEEPCSR)
brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
write ? "write" : "read", func, addr, ret);
else
brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
write ? "write" : "read", func, addr, ret);
}
return ret;
}
@ -988,6 +989,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
SDIO_DEVICE_ID_BROADCOM_4335_4339)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@ -1153,7 +1155,7 @@ static struct sdio_driver brcmf_sdmmc_driver = {
},
};
static int brcmf_sdio_pd_probe(struct platform_device *pdev)
static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
{
brcmf_dbg(SDIO, "Enter\n");

View File

@ -504,6 +504,7 @@ static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
ci->pub.ramsize = 0x3c000;
break;
case BCM4339_CHIP_ID:
case BCM4354_CHIP_ID:
ci->pub.ramsize = 0xc0000;
ci->pub.rambase = 0x180000;
break;
@ -1006,6 +1007,10 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
chip = container_of(pub, struct brcmf_chip_priv, pub);
switch (pub->chip) {
case BCM4354_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
/* fall-through */
case BCM43241_CHIP_ID:
case BCM4335_CHIP_ID:
case BCM4339_CHIP_ID:

View File

@ -175,6 +175,7 @@ struct rte_console {
#define SBSDIO_ALP_AVAIL 0x40
/* Status: HT is ready */
#define SBSDIO_HT_AVAIL 0x80
#define SBSDIO_CSR_MASK 0x1F
#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
@ -458,10 +459,11 @@ struct brcmf_sdio {
bool alp_only; /* Don't use HT clock (ALP only) */
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
u16 ctrl_frame_len;
bool ctrl_frame_stat;
spinlock_t txqlock;
spinlock_t txq_lock; /* protect bus->txq */
struct semaphore tx_seq_lock; /* protect bus->tx_seq */
wait_queue_head_t ctrl_wait;
wait_queue_head_t dcmd_resp_wait;
@ -578,6 +580,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
@ -597,6 +601,8 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
struct brcmf_firmware_names {
u32 chipid;
@ -622,7 +628,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
{ BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
{ BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
{ BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
{ BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
};
@ -714,16 +721,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
int err = 0;
int try_cnt = 0;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(TRACE, "Enter: on=%d\n", on);
wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
/* 1st KSO write goes to AOS wake up core if device is asleep */
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
wr_val, &err);
if (err) {
brcmf_err("SDIO_AOS KSO write error: %d\n", err);
return err;
}
if (on) {
/* device WAKEUP through KSO:
@ -753,13 +756,19 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
&err);
if (((rd_val & bmask) == cmp_val) && !err)
break;
brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
try_cnt, MAX_KSO_ATTEMPTS, err);
udelay(KSO_WAIT_US);
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
wr_val, &err);
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
if (try_cnt > 2)
brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
rd_val, err);
if (try_cnt > MAX_KSO_ATTEMPTS)
brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
return err;
}
@ -960,6 +969,7 @@ static int
brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
{
int err = 0;
u8 clkcsr;
brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"),
@ -978,8 +988,20 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
atomic_read(&bus->ipend) > 0 ||
(!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
data_ok(bus)))
return -EBUSY;
data_ok(bus))) {
err = -EBUSY;
goto done;
}
clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR,
&err);
if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
brcmf_dbg(SDIO, "no clock, set ALP\n");
brcmf_sdiod_regwb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR,
SBSDIO_ALP_AVAIL_REQ, &err);
}
err = brcmf_sdio_kso_control(bus, false);
/* disable watchdog */
if (!err)
@ -996,7 +1018,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
} else {
brcmf_err("error while changing bus sleep state %d\n",
err);
return err;
goto done;
}
}
@ -1008,7 +1030,8 @@ end:
} else {
brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
}
done:
brcmf_dbg(SDIO, "Exit: err=%d\n", err);
return err;
}
@ -2311,13 +2334,15 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* Send frames until the limit or some other event */
for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
pkt_num = 1;
__skb_queue_head_init(&pktq);
if (down_interruptible(&bus->tx_seq_lock))
return cnt;
if (bus->txglom)
pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
bus->sdiodev->txglomsz);
pkt_num = min_t(u32, pkt_num,
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
spin_lock_bh(&bus->txqlock);
__skb_queue_head_init(&pktq);
spin_lock_bh(&bus->txq_lock);
for (i = 0; i < pkt_num; i++) {
pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
&prec_out);
@ -2325,11 +2350,15 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
break;
__skb_queue_tail(&pktq, pkt);
}
spin_unlock_bh(&bus->txqlock);
if (i == 0)
spin_unlock_bh(&bus->txq_lock);
if (i == 0) {
up(&bus->tx_seq_lock);
break;
}
ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
up(&bus->tx_seq_lock);
cnt += i;
/* In poll mode, need to check for other events */
@ -2358,6 +2387,68 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
return cnt;
}
static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
u8 doff;
u16 pad;
uint retries = 0;
struct brcmf_sdio_hdrinfo hd_info = {0};
int ret;
brcmf_dbg(TRACE, "Enter\n");
/* Back the pointer to make room for bus header */
frame -= bus->tx_hdrlen;
len += bus->tx_hdrlen;
/* Add alignment padding (optional for ctl frames) */
doff = ((unsigned long)frame % bus->head_align);
if (doff) {
frame -= doff;
len += doff;
memset(frame + bus->tx_hdrlen, 0, doff);
}
/* Round send length to next SDIO block */
pad = 0;
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
pad = bus->blocksize - (len % bus->blocksize);
if ((pad > bus->roundup) || (pad >= bus->blocksize))
pad = 0;
} else if (len % bus->head_align) {
pad = bus->head_align - (len % bus->head_align);
}
len += pad;
hd_info.len = len - pad;
hd_info.channel = SDPCM_CONTROL_CHANNEL;
hd_info.dat_offset = doff + bus->tx_hdrlen;
hd_info.seq_num = bus->tx_seq;
hd_info.lastfrm = true;
hd_info.tail_pad = pad;
brcmf_sdio_hdpack(bus, frame, &hd_info);
if (bus->txglom)
brcmf_sdio_update_hwhdr(frame, len);
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
frame, len, "Tx Frame:\n");
brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
BRCMF_HDRS_ON(),
frame, min_t(u16, len, 16), "TxHdr:\n");
do {
ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
if (ret < 0)
brcmf_sdio_txfail(bus);
else
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
} while (ret < 0 && retries++ < TXRETRIES);
return ret;
}
static void brcmf_sdio_bus_stop(struct device *dev)
{
u32 local_hostintmask;
@ -2591,26 +2682,23 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_sdio_clrintr(bus);
if (data_ok(bus) && bus->ctrl_frame_stat &&
(bus->clkstate == CLK_AVAIL)) {
if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
(down_interruptible(&bus->tx_seq_lock) == 0)) {
if (data_ok(bus)) {
sdio_claim_host(bus->sdiodev->func[1]);
err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
bus->ctrl_frame_len);
sdio_release_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func[1]);
err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
(u32)bus->ctrl_frame_len);
if (err < 0)
brcmf_sdio_txfail(bus);
else
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
sdio_release_host(bus->sdiodev->func[1]);
bus->ctrl_frame_stat = false;
brcmf_sdio_wait_event_wakeup(bus);
bus->ctrl_frame_stat = false;
brcmf_sdio_wait_event_wakeup(bus);
}
up(&bus->tx_seq_lock);
}
/* Send queued frames (limit 1 if rx may still be pending) */
else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
&& data_ok(bus)) {
if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
data_ok(bus)) {
framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
txlimit;
brcmf_sdio_sendfromq(bus, framecnt);
@ -2644,7 +2732,6 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
ulong flags;
brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
@ -2660,7 +2747,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->sdcnt.fcqueued++;
/* Priority based enq */
spin_lock_irqsave(&bus->txqlock, flags);
spin_lock_bh(&bus->txq_lock);
/* reset bus_flags in packet cb */
*(u16 *)(pkt->cb) = 0;
if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
@ -2675,7 +2762,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->txoff = true;
brcmf_txflowblock(bus->sdiodev->dev, true);
}
spin_unlock_irqrestore(&bus->txqlock, flags);
spin_unlock_bh(&bus->txq_lock);
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@ -2770,87 +2857,27 @@ break2:
}
#endif /* DEBUG */
static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
int ret;
bus->ctrl_frame_stat = false;
ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
if (ret < 0)
brcmf_sdio_txfail(bus);
else
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
return ret;
}
static int
brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len, pad;
uint retries = 0;
u8 doff = 0;
int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
struct brcmf_sdio_hdrinfo hd_info = {0};
int ret = -1;
brcmf_dbg(TRACE, "Enter\n");
/* Back the pointer to make a room for bus header */
frame = msg - bus->tx_hdrlen;
len = (msglen += bus->tx_hdrlen);
/* Add alignment padding (optional for ctl frames) */
doff = ((unsigned long)frame % bus->head_align);
if (doff) {
frame -= doff;
len += doff;
msglen += doff;
memset(frame, 0, doff + bus->tx_hdrlen);
}
/* precondition: doff < bus->head_align */
doff += bus->tx_hdrlen;
/* Round send length to next SDIO block */
pad = 0;
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
pad = bus->blocksize - (len % bus->blocksize);
if ((pad > bus->roundup) || (pad >= bus->blocksize))
pad = 0;
} else if (len % bus->head_align) {
pad = bus->head_align - (len % bus->head_align);
}
len += pad;
/* precondition: IS_ALIGNED((unsigned long)frame, 2) */
/* Make sure backplane clock is on */
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdio_bus_sleep(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
hd_info.len = (u16)msglen;
hd_info.channel = SDPCM_CONTROL_CHANNEL;
hd_info.dat_offset = doff;
hd_info.seq_num = bus->tx_seq;
hd_info.lastfrm = true;
hd_info.tail_pad = pad;
brcmf_sdio_hdpack(bus, frame, &hd_info);
if (bus->txglom)
brcmf_sdio_update_hwhdr(frame, len);
if (down_interruptible(&bus->tx_seq_lock))
return -EINTR;
if (!data_ok(bus)) {
brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
bus->tx_max, bus->tx_seq);
bus->ctrl_frame_stat = true;
up(&bus->tx_seq_lock);
/* Send from dpc */
bus->ctrl_frame_buf = frame;
bus->ctrl_frame_len = len;
bus->ctrl_frame_buf = msg;
bus->ctrl_frame_len = msglen;
bus->ctrl_frame_stat = true;
wait_event_interruptible_timeout(bus->ctrl_wait,
!bus->ctrl_frame_stat,
@ -2861,22 +2888,18 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
ret = 0;
} else {
brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
bus->ctrl_frame_stat = false;
if (down_interruptible(&bus->tx_seq_lock))
return -EINTR;
ret = -1;
}
}
if (ret == -1) {
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
frame, len, "Tx Frame:\n");
brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
BRCMF_HDRS_ON(),
frame, min_t(u16, len, 16), "TxHdr:\n");
do {
sdio_claim_host(bus->sdiodev->func[1]);
ret = brcmf_sdio_tx_frame(bus, frame, len);
sdio_release_host(bus->sdiodev->func[1]);
} while (ret < 0 && retries++ < TXRETRIES);
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdio_bus_sleep(bus, false, false);
ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen);
sdio_release_host(bus->sdiodev->func[1]);
up(&bus->tx_seq_lock);
}
if (ret)
@ -3971,6 +3994,7 @@ brcmf_sdio_watchdog_thread(void *data)
brcmf_sdio_bus_watchdog(bus);
/* Count the tick for reference */
bus->sdcnt.tickcnt++;
reinit_completion(&bus->watchdog_wait);
} else
break;
}
@ -4047,7 +4071,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
spin_lock_init(&bus->rxctl_lock);
spin_lock_init(&bus->txqlock);
spin_lock_init(&bus->txq_lock);
sema_init(&bus->tx_seq_lock, 1);
init_waitqueue_head(&bus->ctrl_wait);
init_waitqueue_head(&bus->dcmd_resp_wait);

View File

@ -48,6 +48,11 @@
#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
/* OBSS Coex Auto/On/Off */
#define BRCMF_OBSS_COEX_AUTO (-1)
#define BRCMF_OBSS_COEX_OFF 0
#define BRCMF_OBSS_COEX_ON 1
enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_CLIENT,
BRCMF_FIL_P2P_IF_GO,
@ -87,6 +92,11 @@ struct brcmf_fil_bss_enable_le {
__le32 enable;
};
struct brcmf_fil_bwcap_le {
__le32 band;
__le32 bw_cap;
};
/**
* struct tdls_iovar - common structure for tdls iovars.
*

View File

@ -797,7 +797,8 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
/* SOCIAL CHANNELS 1, 6, 11 */
search_state = WL_P2P_DISC_ST_SEARCH;
brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
} else if (dev != NULL && vif->mode == WL_MODE_AP) {
} else if (dev != NULL &&
vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
/* If you are already a GO, then do SEARCH only */
brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
search_state = WL_P2P_DISC_ST_SEARCH;
@ -2256,7 +2257,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_cfg80211_vif *vif;
enum brcmf_fil_p2p_if_types iftype;
enum wl_mode mode;
int err;
if (brcmf_cfg80211_vif_event_armed(cfg))
@ -2267,11 +2267,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
switch (type) {
case NL80211_IFTYPE_P2P_CLIENT:
iftype = BRCMF_FIL_P2P_IF_CLIENT;
mode = WL_MODE_BSS;
break;
case NL80211_IFTYPE_P2P_GO:
iftype = BRCMF_FIL_P2P_IF_GO;
mode = WL_MODE_AP;
break;
case NL80211_IFTYPE_P2P_DEVICE:
return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,

View File

@ -191,6 +191,7 @@ static struct ieee80211_supported_band __wl_band_2ghz = {
.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
.bitrates = wl_g_rates,
.n_bitrates = wl_g_rates_size,
.ht_cap = {IEEE80211_HT_CAP_SUP_WIDTH_20_40, true},
};
static struct ieee80211_supported_band __wl_band_5ghz_a = {
@ -494,6 +495,19 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
return err;
}
static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
{
enum nl80211_iftype iftype;
iftype = vif->wdev.iftype;
return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO;
}
static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
{
return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
}
static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
enum nl80211_iftype type,
@ -654,7 +668,6 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
type);
return -EOPNOTSUPP;
case NL80211_IFTYPE_ADHOC:
vif->mode = WL_MODE_IBSS;
infra = 0;
break;
case NL80211_IFTYPE_STATION:
@ -670,12 +683,10 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
*/
return 0;
}
vif->mode = WL_MODE_BSS;
infra = 1;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
vif->mode = WL_MODE_AP;
ap = 1;
break;
default:
@ -699,7 +710,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
err = -EAGAIN;
goto done;
}
brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ?
brcmf_dbg(INFO, "IF Type = %s\n", brcmf_is_ibssmode(vif) ?
"Adhoc" : "Infra");
}
ndev->ieee80211_ptr->iftype = type;
@ -1682,22 +1693,9 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
profile->ssid.SSID_len);
/*increase dwell time to receive probe response or detect Beacon
* from target AP at a noisy air only during connect command
*/
ext_join_params->scan_le.active_time =
cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
ext_join_params->scan_le.passive_time =
cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
/* Set up join scan parameters */
ext_join_params->scan_le.scan_type = -1;
/* to sync with presence period of VSDB GO.
* Send probe request more frequently. Probe request will be stopped
* when it gets probe response from target AP/GO.
*/
ext_join_params->scan_le.nprobes =
cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
ext_join_params->scan_le.home_time = cpu_to_le32(-1);
if (sme->bssid)
@ -1710,6 +1708,25 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ext_join_params->assoc_le.chanspec_list[0] =
cpu_to_le16(chanspec);
/* Increase dwell time to receive probe response or detect
* beacon from target AP at a noisy air only during connect
* command.
*/
ext_join_params->scan_le.active_time =
cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
ext_join_params->scan_le.passive_time =
cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
/* To sync with presence period of VSDB GO send probe request
* more frequently. Probe request will be stopped when it gets
* probe response from target AP/GO.
*/
ext_join_params->scan_le.nprobes =
cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
} else {
ext_join_params->scan_le.active_time = cpu_to_le32(-1);
ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
}
err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
@ -1917,7 +1934,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
if ((ifp->vif->mode != WL_MODE_AP) &&
if (!brcmf_is_apmode(ifp->vif) &&
(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
@ -2016,7 +2033,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
if (ifp->vif->mode != WL_MODE_AP) {
if (!brcmf_is_apmode(ifp->vif)) {
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
@ -2177,7 +2194,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
if (ifp->vif->mode == WL_MODE_AP) {
if (brcmf_is_apmode(ifp->vif)) {
memcpy(&sta_info_le, mac, ETH_ALEN);
err = brcmf_fil_iovar_data_get(ifp, "sta_info",
&sta_info_le,
@ -2194,7 +2211,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
sinfo->inactive_time, sinfo->connected_time);
} else if (ifp->vif->mode == WL_MODE_BSS) {
} else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) {
if (memcmp(mac, bssid, ETH_ALEN)) {
brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
mac, bssid);
@ -2476,11 +2493,6 @@ CleanUp:
return err;
}
static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
{
return vif->mode == WL_MODE_IBSS;
}
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
@ -4253,32 +4265,6 @@ static struct cfg80211_ops wl_cfg80211_ops = {
CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
};
static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
{
switch (type) {
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return -ENOTSUPP;
case NL80211_IFTYPE_ADHOC:
return WL_MODE_IBSS;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
return WL_MODE_BSS;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
return WL_MODE_AP;
case NL80211_IFTYPE_P2P_DEVICE:
return WL_MODE_P2P;
case NL80211_IFTYPE_UNSPECIFIED:
default:
break;
}
return -EINVAL;
}
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
/* scheduled scan settings */
@ -4403,7 +4389,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
vif->wdev.wiphy = cfg->wiphy;
vif->wdev.iftype = type;
vif->mode = brcmf_nl80211_iftype_to_mode(type);
vif->pm_block = pm_block;
vif->roam_off = -1;
@ -4697,7 +4682,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
s32 err = 0;
u16 reason;
if (ifp->vif->mode == WL_MODE_AP) {
if (brcmf_is_apmode(ifp->vif)) {
err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
} else if (brcmf_is_linkup(e)) {
brcmf_dbg(CONN, "Linkup\n");
@ -4945,6 +4930,30 @@ static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
mutex_init(&event->vif_event_lock);
}
static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
{
struct brcmf_fil_bwcap_le band_bwcap;
u32 val;
int err;
/* verify support for bw_cap command */
val = WLC_BAND_5G;
err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
if (!err) {
/* only set 2G bandwidth using bw_cap command */
band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
sizeof(band_bwcap));
} else {
brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
val = WLC_N_BW_40ALL;
err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
}
return err;
}
struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev)
{
@ -5002,6 +5011,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto cfg80211_p2p_attach_out;
}
/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
* setup 40MHz in 2GHz band and enable OBSS scanning.
*/
if (wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
err = brcmf_enable_bw40_2g(ifp);
if (!err)
err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
BRCMF_OBSS_COEX_AUTO);
}
err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
if (err) {
brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);

View File

@ -89,21 +89,6 @@ enum brcmf_scan_status {
BRCMF_SCAN_STATUS_SUPPRESS,
};
/**
* enum wl_mode - driver mode of virtual interface.
*
* @WL_MODE_BSS: connects to BSS.
* @WL_MODE_IBSS: operate as ad-hoc.
* @WL_MODE_AP: operate as access-point.
* @WL_MODE_P2P: provide P2P discovery.
*/
enum wl_mode {
WL_MODE_BSS,
WL_MODE_IBSS,
WL_MODE_AP,
WL_MODE_P2P
};
/* dongle configuration */
struct brcmf_cfg80211_conf {
u32 frag_threshold;
@ -193,7 +178,6 @@ struct vif_saved_ie {
* @ifp: lower layer interface pointer
* @wdev: wireless device.
* @profile: profile information.
* @mode: operating mode.
* @roam_off: roaming state.
* @sme_state: SME state using enum brcmf_vif_status bits.
* @pm_block: power-management blocked.
@ -204,7 +188,6 @@ struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
struct wireless_dev wdev;
struct brcmf_cfg80211_profile profile;
s32 mode;
s32 roam_off;
unsigned long sme_state;
bool pm_block;

View File

@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
bool blocked;
int err;
if (!wl->ucode.bcm43xx_bomminor) {
err = brcms_request_fw(wl, wl->wlc->hw->d11core);
if (err)
return -ENOENT;
}
ieee80211_wake_queues(hw);
spin_lock_bh(&wl->lock);
blocked = brcms_rfkill_set_hw_state(wl);
@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
if (!wl->ucode.bcm43xx_bomminor) {
err = brcms_request_fw(wl, wl->wlc->hw->d11core);
if (err) {
brcms_remove(wl->wlc->hw->d11core);
return -ENOENT;
}
}
spin_lock_bh(&wl->lock);
/* avoid acknowledging frames before a non-monitor device is added */
wl->mute_tx = true;
@ -1094,12 +1092,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* Attach to the WL device identified by vendor and device parameters.
* regs is a host accessible memory address pointing to WL device registers.
*
* brcms_attach is not defined as static because in the case where no bus
* is defined, wl_attach will never be called, and thus, gcc will issue
* a warning that this function is defined but not used if we declare
* it as static.
*
*
* is called in brcms_bcma_probe() context, therefore no locking required.
*/
static struct brcms_info *brcms_attach(struct bcma_device *pdev)

View File

@ -43,5 +43,6 @@
#define BCM4335_CHIP_ID 0x4335
#define BCM43362_CHIP_ID 43362
#define BCM4339_CHIP_ID 0x4339
#define BCM4354_CHIP_ID 0x4354
#endif /* _BRCM_HW_IDS_H_ */

View File

@ -252,13 +252,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, bt_runtime_config);
mutex_lock(&priv->mutex);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
goto out;
/* dont send host command if rf-kill is on */
if (!iwl_is_ready_rf(priv))
return;
goto out;
iwlagn_send_advance_bt_config(priv);
out:
mutex_unlock(&priv->mutex);
}
static void iwl_bg_bt_full_concurrency(struct work_struct *work)
@ -2035,7 +2039,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(priv->hw, skb);
}
static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@ -2045,6 +2049,8 @@ static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
return false;
}
static const struct iwl_op_mode_ops iwl_dvm_ops = {

View File

@ -134,6 +134,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@ -145,6 +146,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.high_temp = true,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_2n_cfg = {
@ -155,6 +157,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_n_cfg = {
@ -165,6 +168,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
};
const struct iwl_cfg iwl3160_2ac_cfg = {

View File

@ -262,6 +262,7 @@ struct iwl_cfg {
bool high_temp;
bool d0i3;
u8 nvm_hw_section_num;
bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
};

View File

@ -138,6 +138,13 @@
/* Analog phase-lock-loop configuration */
#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
/*
* CSR HW resources monitor registers
*/
#define CSR_MONITOR_CFG_REG (CSR_BASE+0x214)
#define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228)
#define CSR_MONITOR_XTAL_RESOURCES (0x00000010)
/*
* CSR Hardware Revision Workaround Register. Indicates hardware rev;
* "step" determines CCK backoff for txpower calculation. Used for 4965 only.
@ -173,6 +180,7 @@
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
@ -240,6 +248,7 @@
* 001 -- MAC power-down
* 010 -- PHY (radio) power-down
* 011 -- Error
* 10: XTAL ON request
* 9-6: SYS_CONFIG
* Indicates current system configuration, reflecting pins on chip
* as forced high/low by device circuit board.
@ -271,6 +280,7 @@
#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
@ -395,6 +405,34 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
/*
* SHR target access (Shared block memory space)
*
* Shared internal registers can be accessed directly from PCI bus through SHR
* arbiter without need for the MAC HW to be powered up. This is possible due to
* indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and
* HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers.
*
* Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW
* need not be powered up so no "grab inc access" is required.
*/
/*
* Registers for accessing shared registers (e.g. SHR_APMG_GP1,
* SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC),
* first, write to the control register:
* HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
* HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access)
* second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0].
*
* To write the register, first, write to the data register
* HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then:
* HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
* HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access)
*/
#define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec)
#define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4)
/*
* HBUS (Host-side Bus)
*

View File

@ -125,6 +125,22 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
};
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
};
/* The default calibrate table size if not specified by firmware file */
#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19

View File

@ -93,14 +93,14 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
}
IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read_prph(trans, ofs);
trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
return val;
}
static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
iwl_trans_write_prph(trans, ofs, val);

View File

@ -70,7 +70,9 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);

View File

@ -299,9 +299,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_vht_cap *vht_cap)
struct ieee80211_sta_vht_cap *vht_cap,
u8 tx_chains, u8 rx_chains)
{
int num_ants = num_of_ant(data->valid_rx_ant);
int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains);
vht_cap->vht_supported = true;
@ -311,8 +313,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (num_ants > 1)
if (num_tx_ants > 1)
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
if (iwlwifi_mod_params.amsdu_size_8K)
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
@ -327,10 +331,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
if (num_ants == 1 ||
cfg->rx_with_siso_diversity) {
vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
/* this works because NOT_SUPPORTED == 3 */
vht_cap->vht_mcs.rx_mcs_map |=
cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
@ -375,7 +377,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
tx_chains, rx_chains);
if (enable_vht)
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",

View File

@ -119,7 +119,8 @@ struct iwl_cfg;
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
* the radio is killed. May sleep.
* the radio is killed. Return %true if the device should be stopped by
* the transport immediately after the call. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
@ -144,7 +145,7 @@ struct iwl_op_mode_ops {
struct iwl_device_cmd *cmd);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
void (*nic_error)(struct iwl_op_mode *op_mode);
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
@ -195,11 +196,11 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
op_mode->ops->queue_not_full(op_mode, queue);
}
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
bool state)
static inline bool __must_check
iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
{
might_sleep();
op_mode->ops->hw_rf_kill(op_mode, state);
return op_mode->ops->hw_rf_kill(op_mode, state);
}
static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,

View File

@ -95,7 +95,8 @@
#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
#define APMG_RTC_INT_STT_RFKILL (0x10000000)
@ -105,6 +106,26 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
#define SHR_BASE 0x00a10000
/* Shared GP1 register */
#define SHR_APMG_GP1_REG 0x01dc
#define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG)
#define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004
#define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000
/* Shared DL_CFG register */
#define SHR_APMG_DL_CFG_REG 0x01c4
#define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG)
#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0
#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080
#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100
/* Shared APMG_XTAL_CFG register */
#define SHR_APMG_XTAL_CFG_REG 0x1c0
#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000
/*
* Device reset for family 8000
* write to bit 24 in order to reset the CPU

View File

@ -2,8 +2,8 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o bt-coex.o
iwlmvm-y += led.o tt.o
iwlmvm-y += power.o coex.o
iwlmvm-y += led.o tt.o offloading.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o

View File

@ -61,9 +61,11 @@
*
*****************************************************************************/
#include <linux/ieee80211.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "fw-api-bt-coex.h"
#include "fw-api-coex.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-debug.h"
@ -305,6 +307,215 @@ static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
cpu_to_le32(0x33113311),
};
struct corunning_block_luts {
u8 range;
__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
};
/*
* Ranges for the antenna coupling calibration / co-running block LUT:
* LUT0: [ 0, 12[
* LUT1: [12, 20[
* LUT2: [20, 21[
* LUT3: [21, 23[
* LUT4: [23, 27[
* LUT5: [27, 30[
* LUT6: [30, 32[
* LUT7: [32, 33[
* LUT8: [33, - [
*/
static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 0,
.lut20 = {
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 12,
.lut20 = {
cpu_to_le32(0x00000001), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 20,
.lut20 = {
cpu_to_le32(0x00000002), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 21,
.lut20 = {
cpu_to_le32(0x00000003), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 23,
.lut20 = {
cpu_to_le32(0x00000004), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 27,
.lut20 = {
cpu_to_le32(0x00000005), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 30,
.lut20 = {
cpu_to_le32(0x00000006), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 32,
.lut20 = {
cpu_to_le32(0x00000007), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 33,
.lut20 = {
cpu_to_le32(0x00000008), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
};
static enum iwl_bt_coex_lut_type
iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
{
@ -390,8 +601,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_LUT |
BT_VALID_WIFI_RX_SW_PRIO_BOOST |
BT_VALID_WIFI_TX_SW_PRIO_BOOST |
BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40 |
BT_VALID_ANT_ISOLATION |
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
@ -401,6 +610,17 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (IWL_MVM_BT_COEX_CORUNNING) {
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40);
bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
}
if (IWL_MVM_BT_COEX_MPLUT) {
bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
}
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
@ -408,6 +628,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
sizeof(iwl_combined_lookup));
/* Take first Co-running block LUT to get started */
memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
sizeof(bt_cmd->bt4_corun_lut20));
memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
sizeof(bt_cmd->bt4_corun_lut40));
memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
sizeof(iwl_bt_prio_boost));
memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
@ -498,7 +724,7 @@ int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_DUP, },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
.flags = CMD_ASYNC,
};
struct iwl_mvm_sta *mvmsta;
@ -952,8 +1178,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum iwl_bt_coex_lut_type lut_type;
@ -989,6 +1215,38 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac)
{
__le16 fc = hdr->frame_control;
if (info->band != IEEE80211_BAND_2GHZ)
return 0;
if (unlikely(mvm->bt_tx_prio))
return mvm->bt_tx_prio - 1;
/* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
is_multicast_ether_addr(hdr->addr1) ||
ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) ||
ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc))
return 3;
switch (ac) {
case IEEE80211_AC_BE:
return 1;
case IEEE80211_AC_VO:
return 3;
case IEEE80211_AC_VI:
return 2;
default:
break;
}
return 0;
}
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
@ -996,3 +1254,69 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
iwl_mvm_bt_coex_notif_handle(mvm);
}
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *dev_cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
struct iwl_bt_coex_cmd *bt_cmd;
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
.flags = CMD_SYNC,
};
if (!IWL_MVM_BT_COEX_CORUNNING)
return 0;
lockdep_assert_held(&mvm->mutex);
if (ant_isolation == mvm->last_ant_isol)
return 0;
for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
break;
lower_bound = antenna_coupling_ranges[lut].range;
if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
upper_bound = antenna_coupling_ranges[lut + 1].range;
else
upper_bound = antenna_coupling_ranges[lut].range;
IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
ant_isolation, lower_bound, upper_bound, lut);
mvm->last_ant_isol = ant_isolation;
if (mvm->last_corun_lut == lut)
return 0;
mvm->last_corun_lut = lut;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
return 0;
cmd.data[0] = bt_cmd;
bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40);
/* For the moment, use the same LUT for 20GHz and 40GHz */
memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
sizeof(bt_cmd->bt4_corun_lut20));
memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(bt_cmd->bt4_corun_lut40));
return 0;
}

View File

@ -79,8 +79,8 @@
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
#define IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR 24 /* TU */
#define IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR 24 /* TU */
#define IWL_MVM_BT_COEX_SYNC2SCO 1
#define IWL_MVM_BT_COEX_CORUNNING 1
#define IWL_MVM_BT_COEX_MPLUT 1
#endif /* __MVM_CONSTANTS_H */

View File

@ -376,139 +376,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
union {
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
struct iwl_proto_offload_cmd_v3_small v3s;
struct iwl_proto_offload_cmd_v3_large v3l;
} cmd = {};
struct iwl_host_cmd hcmd = {
.id = PROT_OFFLOAD_CONFIG_CMD,
.flags = CMD_SYNC,
.data[0] = &cmd,
.dataflags[0] = IWL_HCMD_DFL_DUP,
};
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
struct iwl_ns_config *nsc;
struct iwl_targ_addr *addrs;
int n_nsc, n_addrs;
int c;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
nsc = cmd.v3s.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
addrs = cmd.v3s.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
} else {
nsc = cmd.v3l.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
addrs = cmd.v3l.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
}
if (mvmvif->num_target_ipv6_addrs)
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
/*
* For each address we have (and that will fit) fill a target
* address struct and combine for NS offload structs with the
* solicited node addresses.
*/
for (i = 0, c = 0;
i < mvmvif->num_target_ipv6_addrs &&
i < n_addrs && c < n_nsc; i++) {
struct in6_addr solicited_addr;
int j;
addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
&solicited_addr);
for (j = 0; j < c; j++)
if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
&solicited_addr) == 0)
break;
if (j == c)
c++;
addrs[i].addr = mvmvif->target_ipv6_addrs[i];
addrs[i].config_num = cpu_to_le32(j);
nsc[j].dest_ipv6_addr = solicited_addr;
memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
}
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
else
cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
memcpy(cmd.v2.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v2.target_ipv6_addr[i]));
} else {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
memcpy(cmd.v1.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v1.target_ipv6_addr[i]));
}
#endif
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
common = &cmd.v3s.common;
size = sizeof(cmd.v3s);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
common = &cmd.v3l.common;
size = sizeof(cmd.v3l);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
} else {
common = &cmd.v1.common;
size = sizeof(cmd.v1);
}
if (vif->bss_conf.arp_addr_cnt) {
enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
if (!enabled)
return 0;
common->enabled = cpu_to_le32(enabled);
hcmd.len[0] = size;
return iwl_mvm_send_cmd(mvm, &hcmd);
}
enum iwl_mvm_tcp_packet_type {
MVM_TCP_TX_SYN,
MVM_TCP_RX_SYNACK,
@ -846,8 +713,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
quota_cmd.quotas[0].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
mvmvif->phy_ctxt->color));
quota_cmd.quotas[0].quota = cpu_to_le32(100);
quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
for (i = 1; i < MAX_BINDINGS; i++)
quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
@ -927,6 +794,20 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
static int
iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
const struct iwl_wowlan_config_cmd_v3 *cmd)
{
/* start only with the v2 part of the command */
u16 cmd_len = sizeof(cmd->common);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
cmd_len = sizeof(*cmd);
return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
cmd_len, cmd);
}
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan,
bool test)
@ -939,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
struct iwl_d3_manager_config d3_cfg_cmd_data = {
@ -961,7 +842,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.tkip = &tkip_cmd,
.use_tkip = false,
};
int ret, i;
int ret;
int len __maybe_unused;
if (!wowlan) {
@ -1002,49 +883,41 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
/* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
/* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */
wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
wowlan_config_cmd.common.is_11n_connection =
ap_sta->ht_cap.ht_supported;
/* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
if (ret < 0)
goto out_noreset;
wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret);
/*
* For QoS counters, we store the one to use next, so subtract 0x10
* since the uCode will add 0x10 *before* using the value while we
* increment after using the value (i.e. store the next value to use).
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_ap_sta->tid_data[i].seq_number;
seq -= 0x10;
wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
}
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common);
if (wowlan->disconnect)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
if (wowlan->tcp) {
@ -1052,7 +925,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* Set the "link change" (really "link lost") flag as well
* since that implies losing the TCP connection.
*/
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
@ -1150,9 +1023,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
CMD_SYNC, sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
if (ret)
goto out;
@ -1160,7 +1031,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
ret = iwl_mvm_send_proto_offload(mvm, vif);
ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
if (ret)
goto out;

View File

@ -312,6 +312,11 @@ static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
mutex_lock(&mvm->mutex);
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
if (IS_ERR_OR_NULL(mvmsta)) {
mutex_unlock(&mvm->mutex);
return -ENOTCONN;
}
mvmsta->bt_reduced_txpower_dbg = false;
ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
reduced_tx_power);

View File

@ -60,11 +60,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/vmalloc.h>
#include "mvm.h"
#include "sta.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "debugfs.h"
#include "fw-error-dump.h"
static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@ -117,6 +120,51 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
return ret;
}
static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
{
struct iwl_mvm *mvm = inode->i_private;
int ret;
if (!mvm)
return -EINVAL;
mutex_lock(&mvm->mutex);
if (!mvm->fw_error_dump) {
ret = -ENODATA;
goto out;
}
file->private_data = mvm->fw_error_dump;
mvm->fw_error_dump = NULL;
kfree(mvm->fw_error_sram);
mvm->fw_error_sram = NULL;
mvm->fw_error_sram_len = 0;
ret = 0;
out:
mutex_unlock(&mvm->mutex);
return ret;
}
static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_fw_error_dump_file *dump_file = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos,
dump_file,
le32_to_cpu(dump_file->file_len));
}
static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
struct file *file)
{
vfree(file->private_data);
return 0;
}
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@ -350,6 +398,9 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
le32_to_cpu(notif->secondary_ch_lut));
pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
le32_to_cpu(notif->bt_activity_grading));
pos += scnprintf(buf+pos, bufsz-pos,
"antenna isolation = %d CORUN LUT index = %d\n",
mvm->last_ant_isol, mvm->last_corun_lut);
mutex_unlock(&mvm->mutex);
@ -392,6 +443,22 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t
iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
u32 bt_tx_prio;
if (sscanf(buf, "%u", &bt_tx_prio) != 1)
return -EINVAL;
if (bt_tx_prio > 4)
return -EINVAL;
mvm->bt_tx_prio = bt_tx_prio;
return count;
}
#define PRINT_STATS_LE32(_str, _val) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \
@ -536,56 +603,60 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
loff_t *ppos,
struct iwl_mvm_frame_stats *stats)
{
char *buff;
int pos = 0, idx, i;
char *buff, *pos, *endpos;
int idx, i;
int ret;
size_t bufsz = 1024;
static const size_t bufsz = 1024;
buff = kmalloc(bufsz, GFP_KERNEL);
if (!buff)
return -ENOMEM;
spin_lock_bh(&mvm->drv_stats_lock);
pos += scnprintf(buff + pos, bufsz - pos,
pos = buff;
endpos = pos + bufsz;
pos += scnprintf(pos, endpos - pos,
"Legacy/HT/VHT\t:\t%d/%d/%d\n",
stats->legacy_frames,
stats->ht_frames,
stats->vht_frames);
pos += scnprintf(buff + pos, bufsz - pos, "20/40/80\t:\t%d/%d/%d\n",
pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
stats->bw_20_frames,
stats->bw_40_frames,
stats->bw_80_frames);
pos += scnprintf(buff + pos, bufsz - pos, "NGI/SGI\t\t:\t%d/%d\n",
pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
stats->ngi_frames,
stats->sgi_frames);
pos += scnprintf(buff + pos, bufsz - pos, "SISO/MIMO2\t:\t%d/%d\n",
pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
stats->siso_frames,
stats->mimo2_frames);
pos += scnprintf(buff + pos, bufsz - pos, "FAIL/SCSS\t:\t%d/%d\n",
pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
stats->fail_frames,
stats->success_frames);
pos += scnprintf(buff + pos, bufsz - pos, "MPDUs agg\t:\t%d\n",
pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
stats->agg_frames);
pos += scnprintf(buff + pos, bufsz - pos, "A-MPDUs\t\t:\t%d\n",
pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
stats->ampdu_count);
pos += scnprintf(buff + pos, bufsz - pos, "Avg MPDUs/A-MPDU:\t%d\n",
pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
stats->ampdu_count > 0 ?
(stats->agg_frames / stats->ampdu_count) : 0);
pos += scnprintf(buff + pos, bufsz - pos, "Last Rates\n");
pos += scnprintf(pos, endpos - pos, "Last Rates\n");
idx = stats->last_frame_idx - 1;
for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
if (stats->last_rates[idx] == 0)
continue;
pos += scnprintf(buff + pos, bufsz - pos, "Rate[%d]: ",
pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
(int)(ARRAY_SIZE(stats->last_rates) - i));
pos += rs_pretty_print_rate(buff + pos, stats->last_rates[idx]);
pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
}
spin_unlock_bh(&mvm->drv_stats_lock);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
kfree(buff);
return ret;
@ -1032,9 +1103,16 @@ MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
.open = iwl_dbgfs_fw_error_dump_open,
.read = iwl_dbgfs_fw_error_dump_read,
.release = iwl_dbgfs_fw_error_dump_release,
};
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
@ -1049,12 +1127,15 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
struct dentry *bcast_dir __maybe_unused;
char buf[100];
spin_lock_init(&mvm->drv_stats_lock);
mvm->debugfs_dir = dbgfs_dir;
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
@ -1064,6 +1145,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);

View File

@ -77,6 +77,8 @@
* @BT_COEX_3W:
* @BT_COEX_NW:
* @BT_COEX_SYNC2SCO:
* @BT_COEX_CORUNNING:
* @BT_COEX_MPLUT:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
@ -88,6 +90,8 @@ enum iwl_bt_coex_flags {
BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
BT_COEX_SYNC2SCO = BIT(7),
BT_COEX_CORUNNING = BIT(8),
BT_COEX_MPLUT = BIT(9),
};
/*

View File

@ -239,7 +239,7 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
struct iwl_wowlan_config_cmd {
struct iwl_wowlan_config_cmd_v2 {
__le32 wakeup_filter;
__le16 non_qos_seq;
__le16 qos_seq[8];
@ -247,6 +247,12 @@ struct iwl_wowlan_config_cmd {
u8 is_11n_connection;
} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
struct iwl_wowlan_config_cmd_v3 {
struct iwl_wowlan_config_cmd_v2 common;
u8 offloading_tid;
u8 reserved[3];
} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
/*
* WOWLAN_TSC_RSC_PARAMS
*/

View File

@ -76,6 +76,8 @@
* @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
* @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
* @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
* @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
* on old firmwares).
* @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
* @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
* Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
@ -107,6 +109,7 @@ enum iwl_tx_flags {
TX_CMD_FLG_VHT_NDPA = BIT(8),
TX_CMD_FLG_HT_NDPA = BIT(9),
TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
TX_CMD_FLG_BT_PRIO_POS = 11,
TX_CMD_FLG_BT_DIS = BIT(12),
TX_CMD_FLG_SEQ_CTL = BIT(13),
TX_CMD_FLG_MORE_FRAG = BIT(14),

View File

@ -70,7 +70,7 @@
#include "fw-api-mac.h"
#include "fw-api-power.h"
#include "fw-api-d3.h"
#include "fw-api-bt-coex.h"
#include "fw-api-coex.h"
/* maximal number of Tx queues in any platform */
#define IWL_MVM_MAX_QUEUES 20
@ -95,6 +95,7 @@ enum {
/* PHY context commands */
PHY_CONTEXT_CMD = 0x8,
DBG_CFG = 0x9,
ANTENNA_COUPLING_NOTIFICATION = 0xa,
/* station table */
ADD_STA_KEY = 0x17,

View File

@ -0,0 +1,106 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __fw_error_dump_h__
#define __fw_error_dump_h__
#include <linux/types.h>
#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_SRAM:
* @IWL_FW_ERROR_DUMP_REG:
*/
enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_SRAM = 0,
IWL_FW_ERROR_DUMP_REG = 1,
IWL_FW_ERROR_DUMP_MAX,
};
/**
* struct iwl_fw_error_dump_data - data for one type
* @type: %enum iwl_fw_error_dump_type
* @len: the length starting from %data - must be a multiplier of 4.
* @data: the data itself padded to be a multiplier of 4.
*/
struct iwl_fw_error_dump_data {
__le32 type;
__le32 len;
__u8 data[];
} __packed __aligned(4);
/**
* struct iwl_fw_error_dump_file - the layout of the header of the file
* @barker: must be %IWL_FW_ERROR_DUMP_BARKER
* @file_len: the length of all the file starting from %barker
* @data: array of %struct iwl_fw_error_dump_data
*/
struct iwl_fw_error_dump_file {
__le32 barker;
__le32 file_len;
u8 data[0];
} __packed __aligned(4);
#endif /* __fw_error_dump_h__ */

View File

@ -94,6 +94,8 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
int ret;
switch (mode) {
case IWL_LED_BLINK:
IWL_ERR(mvm, "Blink led mode not supported, used default\n");
case IWL_LED_DEFAULT:
case IWL_LED_RF_STATE:
mode = IWL_LED_RF_STATE;

View File

@ -205,7 +205,7 @@ static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!mvm->trans->cfg->d0i3)
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
@ -215,7 +215,7 @@ void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!mvm->trans->cfg->d0i3)
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
@ -228,7 +228,7 @@ iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
{
int i;
if (!mvm->trans->cfg->d0i3)
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
@ -295,7 +295,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@ -365,7 +365,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@ -375,8 +375,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS |
NL80211_FEATURE_LOW_PRIORITY_SCAN;
NL80211_FEATURE_P2P_GO_OPPPS;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
@ -424,6 +423,47 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return ret;
}
static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct iwl_mvm_sta *mvmsta;
bool defer = false;
/*
* double check the IN_D0I3 flag both before and after
* taking the spinlock, in order to prevent taking
* the spinlock when not needed.
*/
if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
return false;
spin_lock(&mvm->d0i3_tx_lock);
/*
* testing the flag again ensures the skb dequeue
* loop (on d0i3 exit) hasn't run yet.
*/
if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
mvmsta->sta_id != mvm->d0i3_ap_sta_id)
goto out;
__skb_queue_tail(&mvm->d0i3_tx, skb);
ieee80211_stop_queues(mvm->hw);
/* trigger wakeup */
iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
defer = true;
out:
spin_unlock(&mvm->d0i3_tx_lock);
return defer;
}
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@ -451,6 +491,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
sta = NULL;
if (sta) {
if (iwl_mvm_defer_tx(mvm, sta, skb))
return;
if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop;
return;
@ -489,6 +531,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
bool tx_agg_ref = false;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
@ -496,6 +539,23 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
if (!(mvm->nvm_data->sku_cap_11n_enable))
return -EACCES;
/* return from D0i3 before starting a new Tx aggregation */
if (action == IEEE80211_AMPDU_TX_START) {
iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
tx_agg_ref = true;
/*
* wait synchronously until D0i3 exit to get the correct
* sequence number for the tid
*/
if (!wait_event_timeout(mvm->d0i3_exit_waitq,
!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
WARN_ON_ONCE(1);
iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
return -EIO;
}
}
mutex_lock(&mvm->mutex);
switch (action) {
@ -533,6 +593,13 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
}
mutex_unlock(&mvm->mutex);
/*
* If the tid is marked as started, we won't use it for offloaded
* traffic on the next D0i3 entry. It's safe to unref.
*/
if (tx_agg_ref)
iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
return ret;
}
@ -557,6 +624,15 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
#ifdef CONFIG_IWLWIFI_DEBUGFS
static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL };
iwl_mvm_fw_error_dump(mvm);
/* notify the userspace about the error we had */
kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env);
#endif
iwl_trans_stop_device(mvm->trans);
mvm->scan_status = IWL_MVM_SCAN_NONE;
@ -610,6 +686,7 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
iwl_mvm_d0i3_enable_tx(mvm, NULL);
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
@ -1255,6 +1332,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mac(mvm, vif);
@ -1437,8 +1515,6 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_notification_wait wait_scan_done;
static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
int ret;
if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
@ -1448,22 +1524,11 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
switch (mvm->scan_status) {
case IWL_MVM_SCAN_SCHED:
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
NULL, NULL);
iwl_mvm_sched_scan_stop(mvm);
ret = iwl_wait_notification(&mvm->notif_wait,
&wait_scan_done, HZ);
ret = iwl_mvm_sched_scan_stop(mvm);
if (ret) {
ret = -EBUSY;
goto out;
}
/* iwl_mvm_rx_scan_offload_complete_notif() will be called
* soon but will not reset the scan status as it won't be
* IWL_MVM_SCAN_SCHED any more since we queue the next scan
* immediately (below)
*/
break;
case IWL_MVM_SCAN_NONE:
break;
@ -1479,7 +1544,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
out:
mutex_unlock(&mvm->mutex);
/* make sure to flush the Rx handler before the next scan arrives */
iwl_mvm_wait_for_async_handlers(mvm);
return ret;
}
@ -1641,7 +1707,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
if (vif->bss_conf.dtim_period)
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
@ -1738,9 +1806,26 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
IWL_DEBUG_SCAN(mvm,
"SCHED SCAN request during internal scan - abort\n");
switch (mvm->scan_status) {
case IWL_MVM_SCAN_OS:
IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
ret = iwl_mvm_cancel_scan(mvm);
if (ret) {
ret = -EBUSY;
goto out;
}
/*
* iwl_mvm_rx_scan_complete() will be called soon but will
* not reset the scan status as it won't be IWL_MVM_SCAN_OS
* any more since we queue the next scan immediately (below).
* We make sure it is called before the next scan starts by
* flushing the async-handlers work.
*/
break;
case IWL_MVM_SCAN_NONE:
break;
default:
ret = -EBUSY;
goto out;
}
@ -1762,6 +1847,8 @@ err:
mvm->scan_status = IWL_MVM_SCAN_NONE;
out:
mutex_unlock(&mvm->mutex);
/* make sure to flush the Rx handler before the next scan arrives */
iwl_mvm_wait_for_async_handlers(mvm);
return ret;
}
@ -1769,12 +1856,14 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
mutex_lock(&mvm->mutex);
iwl_mvm_sched_scan_stop(mvm);
ret = iwl_mvm_sched_scan_stop(mvm);
mutex_unlock(&mvm->mutex);
iwl_mvm_wait_for_async_handlers(mvm);
return 0;
return ret;
}
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,

View File

@ -230,6 +230,8 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_P2P_CLIENT,
IWL_MVM_REF_AP_IBSS,
IWL_MVM_REF_USER,
IWL_MVM_REF_TX,
IWL_MVM_REF_TX_AGG,
IWL_MVM_REF_COUNT,
};
@ -317,13 +319,13 @@ struct iwl_mvm_vif {
bool seqno_valid;
u16 seqno;
#endif
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
int num_target_ipv6_addrs;
#endif
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_mvm *mvm;
@ -346,6 +348,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
return (void *)vif->drv_priv;
}
extern const u8 tid_to_mac80211_ac[];
enum iwl_scan_status {
IWL_MVM_SCAN_NONE,
IWL_MVM_SCAN_OS,
@ -571,6 +575,9 @@ struct iwl_mvm {
/* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw;
void *fw_error_dump;
void *fw_error_sram;
u32 fw_error_sram_len;
struct led_classdev led;
@ -591,12 +598,20 @@ struct iwl_mvm {
/* d0i3 */
u8 d0i3_ap_sta_id;
bool d0i3_offloading;
struct work_struct d0i3_exit_work;
struct sk_buff_head d0i3_tx;
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
u32 last_ant_isol;
u8 last_corun_lut;
u8 bt_tx_prio;
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
@ -630,6 +645,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_HW_CTKILL,
IWL_MVM_STATUS_ROC_RUNNING,
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3,
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@ -656,6 +672,12 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
return iwl_mvm_sta_from_mac80211(sta);
}
static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
{
return mvm->trans->cfg->d0i3 &&
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@ -680,7 +702,10 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
#endif
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@ -706,6 +731,11 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
{
flush_work(&mvm->async_handlers_wk);
}
/* Statistics */
int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@ -739,6 +769,9 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
@ -793,7 +826,7 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
/* Scheduled scan */
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@ -807,7 +840,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
@ -878,10 +911,17 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd_v2 *cmd);
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
u32 cmd_flags);
/* D0i3 */
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@ -892,10 +932,12 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
@ -942,6 +984,8 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* Low latency */
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool value);
/* get SystemLowLatencyMode - only needed for beacon threshold? */
bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
/* get VMACLowLatencyMode */
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
{

View File

@ -0,0 +1,215 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <net/ipv6.h>
#include <net/addrconf.h>
#include "mvm.h"
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd_v2 *cmd)
{
int i;
/*
* For QoS counters, we store the one to use next, so subtract 0x10
* since the uCode will add 0x10 *before* using the value while we
* increment after using the value (i.e. store the next value to use).
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_ap_sta->tid_data[i].seq_number;
seq -= 0x10;
cmd->qos_seq[i] = cpu_to_le16(seq);
}
}
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
u32 cmd_flags)
{
union {
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
struct iwl_proto_offload_cmd_v3_small v3s;
struct iwl_proto_offload_cmd_v3_large v3l;
} cmd = {};
struct iwl_host_cmd hcmd = {
.id = PROT_OFFLOAD_CONFIG_CMD,
.flags = cmd_flags,
.data[0] = &cmd,
.dataflags[0] = IWL_HCMD_DFL_DUP,
};
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
struct iwl_ns_config *nsc;
struct iwl_targ_addr *addrs;
int n_nsc, n_addrs;
int c;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
nsc = cmd.v3s.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
addrs = cmd.v3s.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
} else {
nsc = cmd.v3l.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
addrs = cmd.v3l.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
}
if (mvmvif->num_target_ipv6_addrs)
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
/*
* For each address we have (and that will fit) fill a target
* address struct and combine for NS offload structs with the
* solicited node addresses.
*/
for (i = 0, c = 0;
i < mvmvif->num_target_ipv6_addrs &&
i < n_addrs && c < n_nsc; i++) {
struct in6_addr solicited_addr;
int j;
addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
&solicited_addr);
for (j = 0; j < c; j++)
if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
&solicited_addr) == 0)
break;
if (j == c)
c++;
addrs[i].addr = mvmvif->target_ipv6_addrs[i];
addrs[i].config_num = cpu_to_le32(j);
nsc[j].dest_ipv6_addr = solicited_addr;
memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
}
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
else
cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
memcpy(cmd.v2.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v2.target_ipv6_addr[i]));
} else {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
memcpy(cmd.v1.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v1.target_ipv6_addr[i]));
}
#endif
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
common = &cmd.v3s.common;
size = sizeof(cmd.v3s);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
common = &cmd.v3l.common;
size = sizeof(cmd.v3l);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
} else {
common = &cmd.v1.common;
size = sizeof(cmd.v1);
}
if (vif->bss_conf.arp_addr_cnt) {
enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
if (!disable_offloading)
common->enabled = cpu_to_le32(enabled);
hcmd.len[0] = size;
return iwl_mvm_send_cmd(mvm, &hcmd);
}

View File

@ -61,6 +61,7 @@
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <net/mac80211.h>
#include "iwl-notif-wait.h"
@ -78,6 +79,7 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw-api-scan.h"
#include "fw-error-dump.h"
#include "time-event.h"
/*
@ -220,13 +222,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
iwl_mvm_rx_ant_coupling_notif, true),
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
@ -321,6 +325,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(MAC_PM_POWER_TABLE),
CMD(BT_COEX_CI),
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
CMD(ANTENNA_COUPLING_NOTIFICATION),
};
#undef CMD
@ -407,6 +412,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
spin_lock_init(&mvm->d0i3_tx_lock);
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
/*
@ -527,6 +536,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
vfree(mvm->fw_error_dump);
kfree(mvm->fw_error_sram);
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
@ -690,7 +701,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
}
static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@ -699,9 +710,9 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
if (state && mvm->cur_ucode != IWL_UCODE_INIT)
iwl_trans_stop_device(mvm->trans);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
return state && mvm->cur_ucode != IWL_UCODE_INIT;
}
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
@ -797,13 +808,52 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
}
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
u32 file_len;
lockdep_assert_held(&mvm->mutex);
if (mvm->fw_error_dump)
return;
file_len = mvm->fw_error_sram_len +
sizeof(*dump_file) +
sizeof(*dump_data);
dump_file = vmalloc(file_len);
if (!dump_file)
return;
mvm->fw_error_dump = dump_file;
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_file->file_len = cpu_to_le32(file_len);
dump_data = (void *)dump_file->data;
dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
/*
* No need for lock since at the stage the FW isn't loaded. So it
* can't assert - we are the only one who can possibly be accessing
* mvm->fw_error_sram right now.
*/
memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
}
#endif
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_mvm_dump_nic_error_log(mvm);
if (!mvm->restart_fw)
iwl_mvm_dump_sram(mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_fw_error_sram_dump(mvm);
#endif
iwl_mvm_nic_restart(mvm);
}
@ -820,8 +870,62 @@ struct iwl_d0i3_iter_data {
struct iwl_mvm *mvm;
u8 ap_sta_id;
u8 vif_count;
u8 offloading_tid;
bool disable_offloading;
};
static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_d0i3_iter_data *iter_data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvmsta;
u32 available_tids = 0;
u8 tid;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
return false;
ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
if (IS_ERR_OR_NULL(ap_sta))
return false;
mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
spin_lock_bh(&mvmsta->lock);
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
/*
* in case of pending tx packets, don't use this tid
* for offloading in order to prevent reuse of the same
* qos seq counters.
*/
if (iwl_mvm_tid_queued(tid_data))
continue;
if (tid_data->state != IWL_AGG_OFF)
continue;
available_tids |= BIT(tid);
}
spin_unlock_bh(&mvmsta->lock);
/*
* disallow protocol offloading if we have no available tid
* (with no pending frames and no active aggregation,
* as we don't handle "holes" properly - the scheduler needs the
* frame's seq number and TFD index to match)
*/
if (!available_tids)
return true;
/* for simplicity, just use the first available tid */
iter_data->offloading_tid = ffs(available_tids) - 1;
return false;
}
static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@ -835,7 +939,16 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
!vif->bss_conf.assoc)
return;
/*
* in case of pending tx packets or active aggregations,
* avoid offloading features in order to prevent reuse of
* the same qos seq counters.
*/
if (iwl_mvm_disallow_offloading(mvm, vif, data))
data->disable_offloading = true;
iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags);
/*
* on init/association, mvm already configures POWER_TABLE_CMD
@ -847,6 +960,34 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
data->vif_count++;
}
static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
struct iwl_wowlan_config_cmd_v3 *cmd,
struct iwl_d0i3_iter_data *iter_data)
{
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
rcu_read_lock();
ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
if (IS_ERR_OR_NULL(ap_sta))
goto out;
mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
cmd->offloading_tid = iter_data->offloading_tid;
/*
* The d0i3 uCode takes care of the nonqos counters,
* so configure only the qos seq ones.
*/
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
out:
rcu_read_unlock();
}
static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@ -855,11 +996,14 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
struct iwl_d0i3_iter_data d0i3_iter_data = {
.mvm = mvm,
};
struct iwl_wowlan_config_cmd wowlan_config_cmd = {
.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE |
IWL_WOWLAN_WAKEUP_BCN_FILTERING),
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
.common = {
.wakeup_filter =
cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE |
IWL_WOWLAN_WAKEUP_BCN_FILTERING),
},
};
struct iwl_d3_manager_config d3_cfg_cmd = {
.min_sleep_time = cpu_to_le32(1000),
@ -867,17 +1011,24 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
/* make sure we have no running tx while configuring the qos */
set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
synchronize_net();
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_enter_d0i3_iterator,
&d0i3_iter_data);
if (d0i3_iter_data.vif_count == 1) {
mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
} else {
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvm->d0i3_offloading = false;
}
iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
@ -914,6 +1065,62 @@ static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
ieee80211_connection_loss(vif);
}
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
{
struct ieee80211_sta *sta = NULL;
struct iwl_mvm_sta *mvm_ap_sta;
int i;
bool wake_queues = false;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->d0i3_tx_lock);
if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
goto out;
IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
/* get the sta in order to update seq numbers and re-enqueue skbs */
sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta)) {
sta = NULL;
goto out;
}
if (mvm->d0i3_offloading && qos_seq) {
/* update qos seq numbers if offloading was enabled */
mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = le16_to_cpu(qos_seq[i]);
/* firmware stores last-used one, we store next one */
seq += 0x10;
mvm_ap_sta->tid_data[i].seq_number = seq;
}
}
out:
/* re-enqueue (or drop) all packets */
while (!skb_queue_empty(&mvm->d0i3_tx)) {
struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
ieee80211_free_txskb(mvm->hw, skb);
/* if the skb_queue is not empty, we need to wake queues */
wake_queues = true;
}
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
wake_up(&mvm->d0i3_exit_waitq);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
if (wake_queues)
ieee80211_wake_queues(mvm->hw);
spin_unlock_bh(&mvm->d0i3_tx_lock);
}
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
@ -924,6 +1131,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
struct iwl_wowlan_status_v6 *status;
int ret;
u32 disconnection_reasons, wakeup_reasons;
__le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
@ -935,6 +1143,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
status = (void *)get_status_cmd.resp_pkt->data;
wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
qos_seq = status->qos_seq_ctr;
IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
@ -948,6 +1157,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
iwl_free_resp(&get_status_cmd);
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
mutex_unlock(&mvm->mutex);
}

View File

@ -511,6 +511,7 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_power_constraint {
struct ieee80211_vif *bf_vif;
struct ieee80211_vif *bss_vif;
struct ieee80211_vif *p2p_vif;
u16 bss_phyctx_id;
u16 p2p_phyctx_id;
bool pm_disabled;
@ -546,6 +547,10 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
if (mvmvif->phy_ctxt)
power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
/* we should have only one P2P vif */
WARN_ON(power_iterator->p2p_vif);
power_iterator->p2p_vif = vif;
IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
power_iterator->p2p_phyctx_id,
power_iterator->bss_phyctx_id);
@ -633,16 +638,18 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
ret = iwl_mvm_power_send_cmd(mvm, vif);
if (ret)
return ret;
if (constraint.bss_vif && vif != constraint.bss_vif) {
if (constraint.bss_vif) {
ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
if (ret)
return ret;
}
if (constraint.p2p_vif) {
ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
if (ret)
return ret;
}
if (!constraint.bf_vif)
return 0;

View File

@ -180,7 +180,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
.colors = { -1, -1, -1, -1 },
.new_vif = newvif,
};
u32 ll_max_duration;
lockdep_assert_held(&mvm->mutex);
@ -199,21 +198,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
}
switch (data.n_low_latency_bindings) {
case 0: /* no low latency - use default */
ll_max_duration = 0;
break;
case 1: /* SingleBindingLowLatencyMode */
ll_max_duration = IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR;
break;
case 2: /* DualBindingLowLatencyMode */
ll_max_duration = IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR;
break;
default: /* MultiBindingLowLatencyMode */
ll_max_duration = 0;
break;
}
/*
* The FW's scheduling session consists of
* IWL_MVM_MAX_QUOTA fragments. Divide these fragments
@ -278,7 +262,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
* binding.
*/
cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
else
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
@ -287,11 +270,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
"Binding=%d, quota=%u > max=%u\n",
idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
if (data.n_interfaces[i] && !data.low_latency[i])
cmd.quotas[idx].max_duration =
cpu_to_le32(ll_max_duration);
else
cmd.quotas[idx].max_duration = cpu_to_le32(0);
cmd.quotas[idx].max_duration = cpu_to_le32(0);
idx++;
}

Some files were not shown because too many files have changed in this diff Show More