Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (60 commits)
  [NIU]: Bump driver version and release date.
  [NIU]: Fix BMAC alternate MAC address indexing.
  net: fix kernel-doc warnings in header files
  [IPV6]: Use BUG_ON instead of if + BUG in fib6_del_route.
  [IPV6]: dst_entry leak in ip4ip6_err. (resend)
  bluetooth: do not move child device other than rfcomm
  bluetooth: put hci dev after del conn
  [NET]: Elminate spurious print_mac() calls.
  [BLUETOOTH] hci_sysfs.c: Kill build warning.
  [NET]: Remove MAC_FMT
  net/8021q/vlan_dev.c: Use print_mac.
  [XFRM]: Fix ordering issue in xfrm_dst_hash_transfer().
  [BLUETOOTH] net/bluetooth/hci_core.c: Use time_* macros
  [IPV6]: Fix hardcoded removing of old module code
  [NETLABEL]: Move some initialization code into __init section.
  [NETLABEL]: Shrink the genl-ops registration code.
  [AX25] ax25_out: check skb for NULL in ax25_kick()
  [TCP]: Fix tcp_v4_send_synack() comment
  [IPV4]: fix alignment of IP-Config output
  Documentation: fix tcp.txt
  ...
This commit is contained in:
Linus Torvalds 2008-02-19 07:52:45 -08:00
commit 07ce198a1e
54 changed files with 574 additions and 476 deletions

View File

@ -1,7 +1,7 @@
TCP protocol
============
Last updated: 21 June 2005
Last updated: 9 February 2008
Contents
========
@ -52,9 +52,9 @@ research and RFC's before developing new modules.
The method that is used to determine which congestion control mechanism is
determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
The default congestion control will be the last one registered (LIFO);
so if you built everything as modules. the default will be reno. If you
build with the default's from Kconfig, then BIC will be builtin (not a module)
and it will end up the default.
so if you built everything as modules, the default will be reno. If you
build with the defaults from Kconfig, then CUBIC will be builtin (not a
module) and it will end up the default.
If you really want a particular default value then you will need
to set it with the sysctl. If you use a sysctl, the module will be autoloaded

View File

@ -50,7 +50,7 @@ struct e1000_stats {
int stat_offset;
};
#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \
offsetof(struct e1000_adapter, m)
static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_packets", E1000_STAT(stats.gprc) },

View File

@ -1195,6 +1195,14 @@ e1000_probe(struct pci_dev *pdev,
printk("%s\n", print_mac(mac, netdev->dev_addr));
if (adapter->hw.bus_type == e1000_bus_type_pci_express) {
DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
"longer be supported by this driver in the future.\n",
pdev->vendor, pdev->device);
DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
"driver instead.\n");
}
/* reset the hardware with the new settings */
e1000_reset(adapter);

View File

@ -1055,23 +1055,6 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter)
}
}
static void e1000_release_manageability(struct e1000_adapter *adapter)
{
if (adapter->flags & FLAG_MNG_PT_ENABLED) {
struct e1000_hw *hw = &adapter->hw;
u32 manc = er32(MANC);
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
manc &= ~E1000_MANC_EN_MNG2HOST;
/* don't explicitly have to mess with MANC2H since
* MANC has an enable disable that gates MANC2H */
ew32(MANC, manc);
}
}
/**
* @e1000_alloc_ring - allocate memory for a ring structure
**/
@ -1561,9 +1544,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
manc = er32(MANC);
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
/* enable receiving management packets to the host. this will probably
* generate destination unreachable messages from the host OS, but
* the packets will be handled on SMBUS */
@ -1690,6 +1670,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
else
rctl |= E1000_RCTL_LPE;
/* Enable hardware CRC frame stripping */
rctl |= E1000_RCTL_SECRC;
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
@ -1755,9 +1738,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* Enable Packet split descriptors */
rctl |= E1000_RCTL_DTYP_PS;
/* Enable hardware CRC frame stripping */
rctl |= E1000_RCTL_SECRC;
psrctl |= adapter->rx_ps_bsize0 >>
E1000_PSRCTL_BSIZE0_SHIFT;
@ -2008,7 +1988,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
u16 mii_reg;
/* WoL is enabled */
if (!adapter->wol)
if (adapter->wol)
return;
/* non-copper PHY? */
@ -2140,8 +2120,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
phy_data &= ~IGP02E1000_PM_SPD;
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
}
e1000_release_manageability(adapter);
}
int e1000e_up(struct e1000_adapter *adapter)
@ -3487,8 +3465,6 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3cold, 0);
}
e1000_release_manageability(adapter);
/* make sure adapter isn't asleep if manageability is enabled */
if (adapter->flags & FLAG_MNG_PT_ENABLED) {
pci_enable_wake(pdev, PCI_D3hot, 1);
@ -4054,8 +4030,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
flush_scheduled_work();
e1000_release_manageability(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */
e1000_release_hw_control(adapter);

View File

@ -130,8 +130,8 @@ static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
static void gfar_configure_serdes(struct net_device *dev);
extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value);
extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum);
extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
#ifdef CONFIG_GFAR_NAPI
static int gfar_poll(struct napi_struct *napi, int budget);
#endif

View File

@ -51,7 +51,7 @@
* the local mdio pins, which may not be the same as system mdio bus, used for
* controlling the external PHYs, for example.
*/
int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id,
int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
int regnum, u16 value)
{
/* Set the PHY address and the register address we want to write */
@ -77,7 +77,7 @@ int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id,
* and are always tied to the local mdio pins, which may not be the
* same as system mdio bus, used for controlling the external PHYs, for eg.
*/
int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum)
int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum)
{
u16 value;

View File

@ -289,7 +289,6 @@ static void ax_bump(struct mkiss *ax)
*ax->rbuff &= ~0x20;
}
}
spin_unlock_bh(&ax->buflock);
count = ax->rcount;
@ -297,17 +296,17 @@ static void ax_bump(struct mkiss *ax)
printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
ax->dev->name);
ax->stats.rx_dropped++;
spin_unlock_bh(&ax->buflock);
return;
}
spin_lock_bh(&ax->buflock);
memcpy(skb_put(skb,count), ax->rbuff, count);
spin_unlock_bh(&ax->buflock);
skb->protocol = ax25_type_trans(skb, ax->dev);
netif_rx(skb);
ax->dev->last_rx = jiffies;
ax->stats.rx_packets++;
ax->stats.rx_bytes += count;
spin_unlock_bh(&ax->buflock);
}
static void kiss_unesc(struct mkiss *ax, unsigned char s)

View File

@ -43,7 +43,7 @@ struct igb_stats {
int stat_offset;
};
#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \
#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \
offsetof(struct igb_adapter, m)
static const struct igb_stats igb_gstrings_stats[] = {
{ "rx_packets", IGB_STAT(stats.gprc) },

View File

@ -606,9 +606,6 @@ static void igb_init_manageability(struct igb_adapter *adapter)
u32 manc2h = rd32(E1000_MANC2H);
u32 manc = rd32(E1000_MANC);
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
/* enable receiving management packets to the host */
/* this will probably generate destination unreachable messages
* from the host OS, but the packets will be handled on SMBUS */
@ -623,25 +620,6 @@ static void igb_init_manageability(struct igb_adapter *adapter)
}
}
static void igb_release_manageability(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
if (adapter->en_mng_pt) {
u32 manc = rd32(E1000_MANC);
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
manc &= ~E1000_MANC_EN_MNG2HOST;
/* don't explicitly have to mess with MANC2H since
* MANC has an enable disable that gates MANC2H */
/* XXX stop the hardware watchdog ? */
wr32(E1000_MANC, manc);
}
}
/**
* igb_configure - configure the hardware for RX and TX
* @adapter: private board structure
@ -844,7 +822,6 @@ void igb_reset(struct igb_adapter *adapter)
igb_reset_adaptive(&adapter->hw);
adapter->hw.phy.ops.get_phy_info(&adapter->hw);
igb_release_manageability(adapter);
}
/**
@ -1178,9 +1155,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
flush_scheduled_work();
igb_release_manageability(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */
igb_release_hw_control(adapter);
@ -3955,8 +3929,6 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3cold, 0);
}
igb_release_manageability(adapter);
/* make sure adapter isn't asleep if manageability is enabled */
if (adapter->en_mng_pt) {
pci_enable_wake(pdev, PCI_D3hot, 1);

View File

@ -49,7 +49,7 @@ struct ixgb_stats {
int stat_offset;
};
#define IXGB_STAT(m) sizeof(((struct ixgb_adapter *)0)->m), \
#define IXGB_STAT(m) FIELD_SIZEOF(struct ixgb_adapter, m), \
offsetof(struct ixgb_adapter, m)
static struct ixgb_stats ixgb_gstrings_stats[] = {
{"rx_packets", IXGB_STAT(net_stats.rx_packets)},

View File

@ -220,7 +220,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
tx_ring->stats.bytes += tx_buffer_info->length;
if (cleaned) {
struct sk_buff *skb = tx_buffer_info->skb;
#ifdef NETIF_F_TSO
unsigned int segs, bytecount;
segs = skb_shinfo(skb)->gso_segs ?: 1;
/* multiply data chunks by size of headers */
@ -228,10 +227,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
skb->len;
total_tx_packets += segs;
total_tx_bytes += bytecount;
#else
total_tx_packets++;
total_tx_bytes += skb->len;
#endif
}
ixgbe_unmap_and_free_tx_resource(adapter,
tx_buffer_info);
@ -1942,6 +1937,10 @@ static int ixgbe_open(struct net_device *netdev)
int err;
u32 num_rx_queues = adapter->num_rx_queues;
/* disallow open during test */
if (test_bit(__IXGBE_TESTING, &adapter->state))
return -EBUSY;
try_intr_reinit:
/* allocate transmit descriptors */
err = ixgbe_setup_all_tx_resources(adapter);
@ -2278,11 +2277,29 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->protocol == htons(ETH_P_IP))
switch (skb->protocol) {
case __constant_htons(ETH_P_IP):
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
type_tucmd_mlhl |=
IXGBE_ADVTXD_TUCMD_L4T_TCP;
break;
if (skb->sk->sk_protocol == IPPROTO_TCP)
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
case __constant_htons(ETH_P_IPV6):
/* XXX what about other V6 headers?? */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
type_tucmd_mlhl |=
IXGBE_ADVTXD_TUCMD_L4T_TCP;
break;
default:
if (unlikely(net_ratelimit())) {
DPRINTK(PROBE, WARNING,
"partial checksum but proto=%x!\n",
skb->protocol);
}
break;
}
}
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@ -2778,6 +2795,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->mac.type, hw->phy.type,
(part_num >> 8), (part_num & 0xff));
if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
"this card is not sufficient for optimal "
"performance.\n");
dev_warn(&pdev->dev, "For optimal performance a x8 "
"PCI-Express slot is required.\n");
}
/* reset the hardware with the new settings */
ixgbe_start_hw(hw);

View File

@ -33,8 +33,8 @@
#define DRV_MODULE_NAME "niu"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.6"
#define DRV_MODULE_RELDATE "January 5, 2008"
#define DRV_MODULE_VERSION "0.7"
#define DRV_MODULE_RELDATE "February 18, 2008"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@ -5147,7 +5147,12 @@ static void niu_set_rx_mode(struct net_device *dev)
index++;
}
} else {
for (i = 0; i < niu_num_alt_addr(np); i++) {
int alt_start;
if (np->flags & NIU_FLAGS_XMAC)
alt_start = 0;
else
alt_start = 1;
for (i = alt_start; i < niu_num_alt_addr(np); i++) {
err = niu_enable_alt_mac(np, i, 0);
if (err)
printk(KERN_WARNING PFX "%s: Error %d "

View File

@ -559,8 +559,16 @@ static int mhz_setup(struct pcmcia_device *link)
/* Read the station address from the CIS. It is stored as the last
(fourth) string in the Version 1 Version/ID tuple. */
if (link->prod_id[3]) {
station_addr = link->prod_id[3];
tuple->DesiredTuple = CISTPL_VERS_1;
if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
rc = -1;
goto free_cfg_mem;
}
/* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
if (next_tuple(link, tuple, parse) != CS_SUCCESS)
first_tuple(link, tuple, parse);
if (parse->version_1.ns > 3) {
station_addr = parse->version_1.str + parse->version_1.ofs[3];
if (cvt_ascii_address(dev, station_addr) == 0) {
rc = 0;
goto free_cfg_mem;

View File

@ -36,6 +36,7 @@
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@ -297,18 +298,11 @@ static void tsi108_check_phy(struct net_device *dev)
u32 speed;
unsigned long flags;
/* Do a dummy read, as for some reason the first read
* after a link becomes up returns link down, even if
* it's been a while since the link came up.
*/
spin_lock_irqsave(&phy_lock, flags);
if (!data->phy_ok)
goto out;
tsi108_read_mii(data, MII_BMSR);
duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
data->init_media = 0;
@ -345,22 +339,21 @@ static void tsi108_check_phy(struct net_device *dev)
TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
if (data->link_up == 0) {
/* The manual says it can take 3-4 usecs for the speed change
* to take effect.
*/
udelay(5);
spin_lock(&data->txlock);
if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
netif_wake_queue(dev);
data->link_up = 1;
spin_unlock(&data->txlock);
}
}
if (data->link_up == 0) {
/* The manual says it can take 3-4 usecs for the speed change
* to take effect.
*/
udelay(5);
spin_lock(&data->txlock);
if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
netif_wake_queue(dev);
data->link_up = 1;
spin_unlock(&data->txlock);
}
} else {
if (data->link_up == 1) {
netif_stop_queue(dev);
@ -1274,12 +1267,11 @@ static void tsi108_init_phy(struct net_device *dev)
* PHY_STAT register before the link up status bit is set.
*/
data->link_up = 1;
data->link_up = 0;
while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
BMSR_LSTATUS)) {
if (i++ > (MII_READ_DELAY / 10)) {
data->link_up = 0;
break;
}
spin_unlock_irqrestore(&phy_lock, flags);
@ -1287,6 +1279,7 @@ static void tsi108_init_phy(struct net_device *dev)
spin_lock_irqsave(&phy_lock, flags);
}
data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
data->phy_ok = 1;
data->init_media = 1;
@ -1527,12 +1520,46 @@ static void tsi108_init_mac(struct net_device *dev)
TSI_WRITE(TSI108_EC_INTMASK, ~0);
}
static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
rc = mii_ethtool_gset(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
rc = mii_ethtool_sset(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
}
static const struct ethtool_ops tsi108_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = tsi108_get_settings,
.set_settings = tsi108_set_settings,
};
static int
tsi108_init_one(struct platform_device *pdev)
{
@ -1584,7 +1611,6 @@ tsi108_init_one(struct platform_device *pdev)
data->mii_if.phy_id = einfo->phy;
data->mii_if.phy_id_mask = 0x1f;
data->mii_if.reg_num_mask = 0x1f;
data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
data->phy = einfo->phy;
data->phy_type = einfo->phy_type;
@ -1598,6 +1624,7 @@ tsi108_init_one(struct platform_device *pdev)
dev->get_stats = tsi108_get_stats;
netif_napi_add(dev, &data->napi, tsi108_poll, 64);
dev->do_ioctl = tsi108_do_ioctl;
dev->ethtool_ops = &tsi108_ethtool_ops;
/* Apparently, the Linux networking code won't use scatter-gather
* if the hardware doesn't do checksums. However, it's faster
@ -1629,6 +1656,7 @@ tsi108_init_one(struct platform_device *pdev)
goto register_fail;
}
platform_set_drvdata(pdev, dev);
printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %s\n",
dev->name, print_mac(mac, dev->dev_addr));
#ifdef DEBUG

View File

@ -1256,7 +1256,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
if (ctl->flags & IEEE80211_TXCTL_NO_ACK)
flags |= AR5K_TXDESC_NOACK;
pktlen = skb->len + FCS_LEN;
pktlen = skb->len;
if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) {
keyidx = ctl->key_idx;
@ -1952,7 +1952,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
}
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len + FCS_LEN,
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
ieee80211_get_hdrlen_from_skb(skb),
AR5K_PKT_TYPE_BEACON, (ctl->power_level * 2), ctl->tx_rate, 1,
AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0);

View File

@ -3506,7 +3506,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
{
u32 frame_type;
struct ath5k_hw_2w_tx_desc *tx_desc;
unsigned int buff_len;
unsigned int frame_len;
tx_desc = (struct ath5k_hw_2w_tx_desc *)&desc->ds_ctl0;
@ -3537,22 +3537,25 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
/* Setup control descriptor */
/* Verify and set frame length */
if (pkt_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
/* remove padding we might have added before */
frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
tx_desc->tx_control_0 = pkt_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
tx_desc->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
/* Verify and set buffer length */
buff_len = pkt_len - FCS_LEN;
/* NB: beacon's BufLen must be a multiple of 4 bytes */
if(type == AR5K_PKT_TYPE_BEACON)
buff_len = roundup(buff_len, 4);
pkt_len = roundup(pkt_len, 4);
if (buff_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
return -EINVAL;
tx_desc->tx_control_1 = buff_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
tx_desc->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
/*
* Verify and set header length
@ -3634,7 +3637,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
{
struct ath5k_hw_4w_tx_desc *tx_desc;
struct ath5k_hw_tx_status *tx_status;
unsigned int buff_len;
unsigned int frame_len;
ATH5K_TRACE(ah->ah_sc);
tx_desc = (struct ath5k_hw_4w_tx_desc *)&desc->ds_ctl0;
@ -3669,22 +3672,25 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
/* Setup control descriptor */
/* Verify and set frame length */
if (pkt_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
/* remove padding we might have added before */
frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
tx_desc->tx_control_0 = pkt_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
tx_desc->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
/* Verify and set buffer length */
buff_len = pkt_len - FCS_LEN;
/* NB: beacon's BufLen must be a multiple of 4 bytes */
if(type == AR5K_PKT_TYPE_BEACON)
buff_len = roundup(buff_len, 4);
pkt_len = roundup(pkt_len, 4);
if (buff_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
return -EINVAL;
tx_desc->tx_control_1 = buff_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
tx_desc->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
tx_desc->tx_control_0 |=
AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |

View File

@ -14,6 +14,12 @@
#include "lo.h"
#include "phy.h"
/* The unique identifier of the firmware that's officially supported by
* this driver version. */
#define B43_SUPPORTED_FIRMWARE_ID "FW13"
#ifdef CONFIG_B43_DEBUG
# define B43_DEBUG 1
#else

View File

@ -58,6 +58,8 @@ MODULE_AUTHOR("Stefano Brivio");
MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
static int modparam_bad_frames_preempt;
module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@ -1859,11 +1861,11 @@ static int b43_upload_microcode(struct b43_wldev *dev)
err = -EOPNOTSUPP;
goto error;
}
b43dbg(dev->wl, "Loading firmware version %u.%u "
"(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
fwrev, fwpatch,
(fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
(fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F);
b43info(dev->wl, "Loading firmware version %u.%u "
"(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
fwrev, fwpatch,
(fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
(fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F);
dev->fw.rev = fwrev;
dev->fw.patch = fwpatch;
@ -4200,6 +4202,33 @@ static struct ssb_driver b43_ssb_driver = {
.resume = b43_resume,
};
static void b43_print_driverinfo(void)
{
const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
*feat_leds = "", *feat_rfkill = "";
#ifdef CONFIG_B43_PCI_AUTOSELECT
feat_pci = "P";
#endif
#ifdef CONFIG_B43_PCMCIA
feat_pcmcia = "M";
#endif
#ifdef CONFIG_B43_NPHY
feat_nphy = "N";
#endif
#ifdef CONFIG_B43_LEDS
feat_leds = "L";
#endif
#ifdef CONFIG_B43_RFKILL
feat_rfkill = "R";
#endif
printk(KERN_INFO "Broadcom 43xx driver loaded "
"[ Features: %s%s%s%s%s, Firmware-ID: "
B43_SUPPORTED_FIRMWARE_ID " ]\n",
feat_pci, feat_pcmcia, feat_nphy,
feat_leds, feat_rfkill);
}
static int __init b43_init(void)
{
int err;
@ -4211,6 +4240,7 @@ static int __init b43_init(void)
err = ssb_driver_register(&b43_ssb_driver);
if (err)
goto err_pcmcia_exit;
b43_print_driverinfo();
return err;

View File

@ -23,6 +23,10 @@
#include "phy.h"
/* The unique identifier of the firmware that's officially supported by this
* driver version. */
#define B43legacy_SUPPORTED_FIRMWARE_ID "FW10"
#define B43legacy_IRQWAIT_MAX_RETRIES 20
#define B43legacy_RX_MAX_SSI 60 /* best guess at max ssi */

View File

@ -354,7 +354,8 @@ return 0;
}
u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx)
static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
int controller_idx)
{
static const u16 map64[] = {
B43legacy_MMIO_DMA64_BASE0,
@ -373,7 +374,7 @@ u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx)
B43legacy_MMIO_DMA32_BASE5,
};
if (dma64bit) {
if (type == B43legacy_DMA_64BIT) {
B43legacy_WARN_ON(!(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map64)));
return map64[controller_idx];
@ -480,8 +481,9 @@ static void free_ringmemory(struct b43legacy_dmaring *ring)
}
/* Reset the RX DMA channel */
int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
u16 mmio_base, int dma64)
static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
u16 mmio_base,
enum b43legacy_dmatype type)
{
int i;
u32 value;
@ -489,13 +491,14 @@ int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
might_sleep();
offset = dma64 ? B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
offset = (type == B43legacy_DMA_64BIT) ?
B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
b43legacy_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
offset = dma64 ? B43legacy_DMA64_RXSTATUS :
B43legacy_DMA32_RXSTATUS;
offset = (type == B43legacy_DMA_64BIT) ?
B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
if (dma64) {
if (type == B43legacy_DMA_64BIT) {
value &= B43legacy_DMA64_RXSTAT;
if (value == B43legacy_DMA64_RXSTAT_DISABLED) {
i = -1;
@ -519,8 +522,9 @@ int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
}
/* Reset the RX DMA channel */
int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
u16 mmio_base, int dma64)
static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
u16 mmio_base,
enum b43legacy_dmatype type)
{
int i;
u32 value;
@ -529,10 +533,10 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
might_sleep();
for (i = 0; i < 10; i++) {
offset = dma64 ? B43legacy_DMA64_TXSTATUS :
B43legacy_DMA32_TXSTATUS;
offset = (type == B43legacy_DMA_64BIT) ?
B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
if (dma64) {
if (type == B43legacy_DMA_64BIT) {
value &= B43legacy_DMA64_TXSTAT;
if (value == B43legacy_DMA64_TXSTAT_DISABLED ||
value == B43legacy_DMA64_TXSTAT_IDLEWAIT ||
@ -547,13 +551,14 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
}
msleep(1);
}
offset = dma64 ? B43legacy_DMA64_TXCTL : B43legacy_DMA32_TXCTL;
offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL :
B43legacy_DMA32_TXCTL;
b43legacy_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
offset = dma64 ? B43legacy_DMA64_TXSTATUS :
B43legacy_DMA32_TXSTATUS;
offset = (type == B43legacy_DMA_64BIT) ?
B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
value = b43legacy_read32(dev, mmio_base + offset);
if (dma64) {
if (type == B43legacy_DMA_64BIT) {
value &= B43legacy_DMA64_TXSTAT;
if (value == B43legacy_DMA64_TXSTAT_DISABLED) {
i = -1;
@ -578,6 +583,32 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
return 0;
}
/* Check if a DMA mapping address is invalid. */
static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
dma_addr_t addr,
size_t buffersize)
{
if (unlikely(dma_mapping_error(addr)))
return 1;
switch (ring->type) {
case B43legacy_DMA_30BIT:
if ((u64)addr + buffersize > (1ULL << 30))
return 1;
break;
case B43legacy_DMA_32BIT:
if ((u64)addr + buffersize > (1ULL << 32))
return 1;
break;
case B43legacy_DMA_64BIT:
/* Currently we can't have addresses beyond 64 bits in the kernel. */
break;
}
/* The address is OK. */
return 0;
}
static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
struct b43legacy_dmadesc_generic *desc,
struct b43legacy_dmadesc_meta *meta,
@ -595,7 +626,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data,
ring->rx_buffersize, 0);
if (dma_mapping_error(dmaaddr)) {
if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
/* ugh. try to realloc in zone_dma */
gfp_flags |= GFP_DMA;
@ -608,7 +639,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
ring->rx_buffersize, 0);
}
if (dma_mapping_error(dmaaddr)) {
if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
dev_kfree_skb_any(skb);
return -EIO;
}
@ -674,7 +705,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
u32 trans = ssb_dma_translation(ring->dev->dev);
if (ring->tx) {
if (ring->dma64) {
if (ring->type == B43legacy_DMA_64BIT) {
u64 ringbase = (u64)(ring->dmabase);
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@ -709,7 +740,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
err = alloc_initial_descbuffers(ring);
if (err)
goto out;
if (ring->dma64) {
if (ring->type == B43legacy_DMA_64BIT) {
u64 ringbase = (u64)(ring->dmabase);
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@ -760,16 +791,16 @@ static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
{
if (ring->tx) {
b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
ring->dma64);
if (ring->dma64) {
ring->type);
if (ring->type == B43legacy_DMA_64BIT) {
b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
} else
b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
} else {
b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
ring->dma64);
if (ring->dma64) {
ring->type);
if (ring->type == B43legacy_DMA_64BIT) {
b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
} else
@ -824,11 +855,10 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev)
/* Main initialization function. */
static
struct b43legacy_dmaring *b43legacy_setup_dmaring(
struct b43legacy_wldev *dev,
int controller_index,
int for_tx,
int dma64)
struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
int controller_index,
int for_tx,
enum b43legacy_dmatype type)
{
struct b43legacy_dmaring *ring;
int err;
@ -838,6 +868,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto out;
ring->type = type;
nr_slots = B43legacy_RXRING_SLOTS;
if (for_tx)
@ -855,12 +886,12 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
dma_test = dma_map_single(dev->dev->dev,
ring->txhdr_cache,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
dma_test = dma_map_single(dev->dev->dev, ring->txhdr_cache,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
if (dma_mapping_error(dma_test)) {
if (b43legacy_dma_mapping_error(ring, dma_test,
sizeof(struct b43legacy_txhdr_fw3))) {
/* ugh realloc */
kfree(ring->txhdr_cache);
ring->txhdr_cache = kcalloc(nr_slots,
@ -874,7 +905,8 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
if (dma_mapping_error(dma_test))
if (b43legacy_dma_mapping_error(ring, dma_test,
sizeof(struct b43legacy_txhdr_fw3)))
goto err_kfree_txhdr_cache;
}
@ -885,11 +917,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
ring->dev = dev;
ring->nr_slots = nr_slots;
ring->mmio_base = b43legacy_dmacontroller_base(dma64,
controller_index);
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring->index = controller_index;
ring->dma64 = !!dma64;
if (dma64)
if (type == B43legacy_DMA_64BIT)
ring->ops = &dma64_ops;
else
ring->ops = &dma32_ops;
@ -939,10 +969,10 @@ static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
if (!ring)
return;
b43legacydbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots:"
" %d/%d\n", (ring->dma64) ? "64" : "32", ring->mmio_base,
(ring->tx) ? "TX" : "RX",
ring->max_used_slots, ring->nr_slots);
b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
" %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
(ring->tx) ? "TX" : "RX", ring->max_used_slots,
ring->nr_slots);
/* Device IRQs are disabled prior entering this function,
* so no need to take care of concurrency with rx handler stuff.
*/
@ -988,11 +1018,22 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
struct b43legacy_dmaring *ring;
int err;
u64 dmamask;
int dma64 = 0;
enum b43legacy_dmatype type;
dmamask = supported_dma_mask(dev);
if (dmamask == DMA_64BIT_MASK)
dma64 = 1;
switch (dmamask) {
default:
B43legacy_WARN_ON(1);
case DMA_30BIT_MASK:
type = B43legacy_DMA_30BIT;
break;
case DMA_32BIT_MASK:
type = B43legacy_DMA_32BIT;
break;
case DMA_64BIT_MASK:
type = B43legacy_DMA_64BIT;
break;
}
err = ssb_dma_set_mask(dev->dev, dmamask);
if (err) {
@ -1010,52 +1051,50 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
err = -ENOMEM;
/* setup TX DMA channels. */
ring = b43legacy_setup_dmaring(dev, 0, 1, dma64);
ring = b43legacy_setup_dmaring(dev, 0, 1, type);
if (!ring)
goto out;
dma->tx_ring0 = ring;
ring = b43legacy_setup_dmaring(dev, 1, 1, dma64);
ring = b43legacy_setup_dmaring(dev, 1, 1, type);
if (!ring)
goto err_destroy_tx0;
dma->tx_ring1 = ring;
ring = b43legacy_setup_dmaring(dev, 2, 1, dma64);
ring = b43legacy_setup_dmaring(dev, 2, 1, type);
if (!ring)
goto err_destroy_tx1;
dma->tx_ring2 = ring;
ring = b43legacy_setup_dmaring(dev, 3, 1, dma64);
ring = b43legacy_setup_dmaring(dev, 3, 1, type);
if (!ring)
goto err_destroy_tx2;
dma->tx_ring3 = ring;
ring = b43legacy_setup_dmaring(dev, 4, 1, dma64);
ring = b43legacy_setup_dmaring(dev, 4, 1, type);
if (!ring)
goto err_destroy_tx3;
dma->tx_ring4 = ring;
ring = b43legacy_setup_dmaring(dev, 5, 1, dma64);
ring = b43legacy_setup_dmaring(dev, 5, 1, type);
if (!ring)
goto err_destroy_tx4;
dma->tx_ring5 = ring;
/* setup RX DMA channels. */
ring = b43legacy_setup_dmaring(dev, 0, 0, dma64);
ring = b43legacy_setup_dmaring(dev, 0, 0, type);
if (!ring)
goto err_destroy_tx5;
dma->rx_ring0 = ring;
if (dev->dev->id.revision < 5) {
ring = b43legacy_setup_dmaring(dev, 3, 0, dma64);
ring = b43legacy_setup_dmaring(dev, 3, 0, type);
if (!ring)
goto err_destroy_rx0;
dma->rx_ring3 = ring;
}
b43legacydbg(dev->wl, "%d-bit DMA initialized\n",
(dmamask == DMA_64BIT_MASK) ? 64 :
(dmamask == DMA_32BIT_MASK) ? 32 : 30);
b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
err = 0;
out:
return err;
@ -1194,9 +1233,13 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
}
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
sizeof(struct b43legacy_txhdr_fw3), 1);
if (dma_mapping_error(meta_hdr->dmaaddr))
sizeof(struct b43legacy_txhdr_fw3), 1);
if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
sizeof(struct b43legacy_txhdr_fw3))) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
return -EIO;
}
ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
@ -1211,7 +1254,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (dma_mapping_error(meta->dmaaddr)) {
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
@ -1225,7 +1268,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
skb = bounce_skb;
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (dma_mapping_error(meta->dmaaddr)) {
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -EIO;

View File

@ -218,6 +218,12 @@ struct b43legacy_dma_ops {
void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot);
};
enum b43legacy_dmatype {
B43legacy_DMA_30BIT = 30,
B43legacy_DMA_32BIT = 32,
B43legacy_DMA_64BIT = 64,
};
struct b43legacy_dmaring {
/* Lowlevel DMA ops. */
const struct b43legacy_dma_ops *ops;
@ -250,8 +256,8 @@ struct b43legacy_dmaring {
int index;
/* Boolean. Is this a TX ring? */
bool tx;
/* Boolean. 64bit DMA if true, 32bit DMA otherwise. */
bool dma64;
/* The type of DMA engine used. */
enum b43legacy_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped;
/* Lock, only used for TX. */
@ -284,15 +290,6 @@ void b43legacy_dma_write(struct b43legacy_dmaring *ring,
int b43legacy_dma_init(struct b43legacy_wldev *dev);
void b43legacy_dma_free(struct b43legacy_wldev *dev);
int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
u16 dmacontroller_mmio_base,
int dma64);
int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
u16 dmacontroller_mmio_base,
int dma64);
u16 b43legacy_dmacontroller_base(int dma64bit, int dmacontroller_idx);
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev);
@ -320,20 +317,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
{
}
static inline
int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
u16 dmacontroller_mmio_base,
int dma64)
{
return 0;
}
static inline
int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
u16 dmacontroller_mmio_base,
int dma64)
{
return 0;
}
static inline
void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
struct ieee80211_tx_queue_stats *stats)
{

View File

@ -3,7 +3,7 @@
* Broadcom B43legacy wireless driver
*
* Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>
* Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
* Copyright (c) 2005-2008 Stefano Brivio <stefano.brivio@polimi.it>
* Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
* Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
* Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@ -60,6 +60,8 @@ MODULE_AUTHOR("Stefano Brivio");
MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
static int modparam_pio;
module_param_named(pio, modparam_pio, int, 0444);
@ -1640,10 +1642,11 @@ static int b43legacy_upload_microcode(struct b43legacy_wldev *dev)
err = -EOPNOTSUPP;
goto error;
}
b43legacydbg(dev->wl, "Loading firmware version 0x%X, patch level %u "
"(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch,
(fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
(fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F);
b43legacyinfo(dev->wl, "Loading firmware version 0x%X, patch level %u "
"(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch,
(fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
(fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F,
fwtime & 0x1F);
dev->fw.rev = fwrev;
dev->fw.patch = fwpatch;
@ -3806,6 +3809,32 @@ static struct ssb_driver b43legacy_ssb_driver = {
.resume = b43legacy_resume,
};
static void b43legacy_print_driverinfo(void)
{
const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "",
*feat_pio = "", *feat_dma = "";
#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
feat_pci = "P";
#endif
#ifdef CONFIG_B43LEGACY_LEDS
feat_leds = "L";
#endif
#ifdef CONFIG_B43LEGACY_RFKILL
feat_rfkill = "R";
#endif
#ifdef CONFIG_B43LEGACY_PIO
feat_pio = "I";
#endif
#ifdef CONFIG_B43LEGACY_DMA
feat_dma = "D";
#endif
printk(KERN_INFO "Broadcom 43xx driver loaded "
"[ Features: %s%s%s%s%s, Firmware-ID: "
B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
}
static int __init b43legacy_init(void)
{
int err;
@ -3816,6 +3845,8 @@ static int __init b43legacy_init(void)
if (err)
goto err_dfs_exit;
b43legacy_print_driverinfo();
return err;
err_dfs_exit:

View File

@ -3365,7 +3365,6 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->processed = RX_QUEUE_SIZE - 1;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
@ -3607,7 +3606,22 @@ static int ipw_load(struct ipw_priv *priv)
* Driver allocates buffers of this size for Rx
*/
static inline int ipw_queue_space(const struct clx2_queue *q)
/**
* ipw_rx_queue_space - Return number of free slots available in queue.
*/
static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
{
int s = q->read - q->write;
if (s <= 0)
s += RX_QUEUE_SIZE;
/* keep some buffer to not confuse full and empty queue */
s -= 2;
if (s < 0)
s = 0;
return s;
}
static inline int ipw_tx_queue_space(const struct clx2_queue *q)
{
int s = q->last_used - q->first_empty;
if (s <= 0)
@ -4947,7 +4961,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
priv->tx_packets++;
}
done:
if ((ipw_queue_space(q) > q->low_mark) &&
if ((ipw_tx_queue_space(q) > q->low_mark) &&
(qindex >= 0) &&
(priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
netif_wake_queue(priv->net_dev);
@ -4965,7 +4979,7 @@ static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
struct clx2_queue *q = &txq->q;
struct tfd_frame *tfd;
if (ipw_queue_space(q) < (sync ? 1 : 2)) {
if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
IPW_ERROR("No space for Tx\n");
return -EBUSY;
}
@ -5070,7 +5084,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write;
while ((rxq->write != rxq->processed) && (rxq->free_count)) {
while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
element = rxq->rx_free.next;
rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
list_del(element);
@ -5187,7 +5201,6 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->processed = RX_QUEUE_SIZE - 1;
rxq->free_count = 0;
return rxq;
@ -8223,13 +8236,17 @@ static void ipw_rx(struct ipw_priv *priv)
struct ieee80211_hdr_4addr *header;
u32 r, w, i;
u8 network_packet;
u8 fill_rx = 0;
DECLARE_MAC_BUF(mac);
DECLARE_MAC_BUF(mac2);
DECLARE_MAC_BUF(mac3);
r = ipw_read32(priv, IPW_RX_READ_INDEX);
w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
i = priv->rxq->read;
if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
fill_rx = 1;
while (i != r) {
rxb = priv->rxq->queue[i];
@ -8404,11 +8421,17 @@ static void ipw_rx(struct ipw_priv *priv)
list_add_tail(&rxb->list, &priv->rxq->rx_used);
i = (i + 1) % RX_QUEUE_SIZE;
/* If there are a lot of unsued frames, restock the Rx queue
* so the ucode won't assert */
if (fill_rx) {
priv->rxq->read = i;
ipw_rx_queue_replenish(priv);
}
}
/* Backtrack one entry */
priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
priv->rxq->read = i;
ipw_rx_queue_restock(priv);
}
@ -10336,7 +10359,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
ipw_write32(priv, q->reg_w, q->first_empty);
if (ipw_queue_space(q) < q->high_mark)
if (ipw_tx_queue_space(q) < q->high_mark)
netif_stop_queue(priv->net_dev);
return NETDEV_TX_OK;
@ -10357,7 +10380,7 @@ static int ipw_net_is_queue_full(struct net_device *dev, int pri)
struct clx2_tx_queue *txq = &priv->txq[0];
#endif /* CONFIG_IPW2200_QOS */
if (ipw_queue_space(&txq->q) < txq->q.high_mark)
if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
return 1;
return 0;

View File

@ -687,6 +687,12 @@ static int iwl3945_enqueue_hcmd(struct iwl3945_priv *priv, struct iwl3945_host_c
BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
!(cmd->meta.flags & CMD_SIZE_HUGE));
if (iwl3945_is_rfkill(priv)) {
IWL_DEBUG_INFO("Not sending command - RF KILL");
return -EIO;
}
if (iwl3945_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
IWL_ERROR("No space for Tx\n");
return -ENOSPC;
@ -1580,7 +1586,7 @@ static inline int iwl3945_eeprom_acquire_semaphore(struct iwl3945_priv *priv)
*/
int iwl3945_eeprom_init(struct iwl3945_priv *priv)
{
__le16 *e = (__le16 *)&priv->eeprom;
u16 *e = (u16 *)&priv->eeprom;
u32 gp = iwl3945_read32(priv, CSR_EEPROM_GP);
u32 r;
int sz = sizeof(priv->eeprom);
@ -1623,7 +1629,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
IWL_ERROR("Time out reading EEPROM[%d]", addr);
return -ETIMEDOUT;
}
e[addr / 2] = cpu_to_le16(r >> 16);
e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
}
return 0;
@ -2806,7 +2812,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
#endif
/* drop all data frame if we are not associated */
if ((!iwl3945_is_associated(priv) || !priv->assoc_id) &&
if ((!iwl3945_is_associated(priv) ||
((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id)) &&
((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
goto drop_unlock;
@ -4281,7 +4288,7 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
int reclaim;
unsigned long flags;
u8 fill_rx = 0;
u32 count = 0;
u32 count = 8;
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
@ -6256,6 +6263,8 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
STATUS_RF_KILL_HW |
test_bit(STATUS_RF_KILL_SW, &priv->status) <<
STATUS_RF_KILL_SW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
test_bit(STATUS_IN_SUSPEND, &priv->status) <<
STATUS_IN_SUSPEND;
goto exit;
@ -6267,6 +6276,8 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
STATUS_RF_KILL_HW |
test_bit(STATUS_RF_KILL_SW, &priv->status) <<
STATUS_RF_KILL_SW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
test_bit(STATUS_IN_SUSPEND, &priv->status) <<
STATUS_IN_SUSPEND |
test_bit(STATUS_FW_ERROR, &priv->status) <<

View File

@ -692,6 +692,11 @@ static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_c
BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
!(cmd->meta.flags & CMD_SIZE_HUGE));
if (iwl4965_is_rfkill(priv)) {
IWL_DEBUG_INFO("Not sending command - RF KILL");
return -EIO;
}
if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
IWL_ERROR("No space for Tx\n");
return -ENOSPC;
@ -1654,7 +1659,7 @@ static inline void iwl4965_eeprom_release_semaphore(struct iwl4965_priv *priv)
*/
int iwl4965_eeprom_init(struct iwl4965_priv *priv)
{
__le16 *e = (__le16 *)&priv->eeprom;
u16 *e = (u16 *)&priv->eeprom;
u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
u32 r;
int sz = sizeof(priv->eeprom);
@ -1698,7 +1703,7 @@ int iwl4965_eeprom_init(struct iwl4965_priv *priv)
rc = -ETIMEDOUT;
goto done;
}
e[addr / 2] = cpu_to_le16(r >> 16);
e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
}
rc = 0;
@ -2935,7 +2940,7 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
/* drop all data frame if we are not associated */
if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
(!iwl4965_is_associated(priv) ||
!priv->assoc_id ||
((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
!priv->assoc_station_added)) {
IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n");
goto drop_unlock;
@ -4664,7 +4669,7 @@ static void iwl4965_rx_handle(struct iwl4965_priv *priv)
int reclaim;
unsigned long flags;
u8 fill_rx = 0;
u32 count = 0;
u32 count = 8;
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
@ -6680,6 +6685,8 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
STATUS_RF_KILL_HW |
test_bit(STATUS_RF_KILL_SW, &priv->status) <<
STATUS_RF_KILL_SW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
test_bit(STATUS_IN_SUSPEND, &priv->status) <<
STATUS_IN_SUSPEND;
goto exit;
@ -6691,6 +6698,8 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
STATUS_RF_KILL_HW |
test_bit(STATUS_RF_KILL_SW, &priv->status) <<
STATUS_RF_KILL_SW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
test_bit(STATUS_IN_SUSPEND, &priv->status) <<
STATUS_IN_SUSPEND |
test_bit(STATUS_FW_ERROR, &priv->status) <<

View File

@ -2300,7 +2300,7 @@ static void rndis_update_wireless_stats(struct work_struct *work)
struct usbnet *usbdev = priv->usbdev;
struct iw_statistics iwstats;
__le32 rssi, tmp;
int len, ret, bitrate, j;
int len, ret, j;
unsigned long flags;
int update_jiffies = STATS_UPDATE_JIFFIES;
void *buf;
@ -2352,14 +2352,10 @@ static void rndis_update_wireless_stats(struct work_struct *work)
if (ret == 0)
iwstats.discard.misc += le32_to_cpu(tmp);
/* Workaround transfer stalls on poor quality links. */
len = sizeof(tmp);
ret = rndis_query_oid(usbdev, OID_GEN_LINK_SPEED, &tmp, &len);
if (ret == 0) {
bitrate = le32_to_cpu(tmp) * 100;
if (bitrate > 11000000)
goto end;
/* Workaround transfer stalls on poor quality links.
* TODO: find right way to fix these stalls (as stalls do not happen
* with ndiswrapper/windows driver). */
if (iwstats.qual.qual <= 25) {
/* Decrease stats worker interval to catch stalls.
* faster. Faster than 400-500ms causes packet loss,
* Slower doesn't catch stalls fast enough.

View File

@ -1839,11 +1839,11 @@ static struct usb_device_id rt2500usb_device_table[] = {
/* Hercules */
{ USB_DEVICE(0x06f8, 0xe000), USB_DEVICE_DATA(&rt2500usb_ops) },
/* Melco */
{ USB_DEVICE(0x0411, 0x005e), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x0411, 0x0066), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x0411, 0x0067), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x0411, 0x008b), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x0411, 0x0097), USB_DEVICE_DATA(&rt2500usb_ops) },
/* MSI */
{ USB_DEVICE(0x0db0, 0x6861), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x0db0, 0x6865), USB_DEVICE_DATA(&rt2500usb_ops) },

View File

@ -85,7 +85,7 @@ union hacs_u
#define HASR_MMC_INTR 0x0002 /* Interrupt request from MMC */
#define HASR_MMC_BUSY 0x0004 /* MMC busy indication */
#define HASR_PSA_BUSY 0x0008 /* LAN parameter storage area busy */
};
} __attribute__ ((packed));
typedef struct ha_t ha_t;
struct ha_t
@ -292,7 +292,7 @@ struct mmw_t
#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */
#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */
#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */
};
} __attribute__ ((packed));
#define MMW_SIZE 37
@ -347,7 +347,7 @@ struct mmr_t
unsigned char mmr_unused4[1]; /* unused */
unsigned char mmr_fee_data_l; /* Read data from EEPROM (low) */
unsigned char mmr_fee_data_h; /* Read data from EEPROM (high) */
};
} __attribute__ ((packed));
#define MMR_SIZE 36

View File

@ -129,8 +129,7 @@ extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
/*
* Display a 6 byte device address (MAC) in a readable format.
*/
extern char *print_mac(char *buf, const unsigned char *addr);
#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
extern __pure char *print_mac(char *buf, const unsigned char *addr);
#define MAC_BUF_SIZE 18
#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] __maybe_unused

View File

@ -232,6 +232,8 @@ typedef unsigned char *sk_buff_data_t;
* @mark: Generic packet mark
* @nfct: Associated connection, if any
* @ipvs_property: skbuff is owned by ipvs
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
* @nfctinfo: Relationship of this skb to the connection
* @nfct_reasm: netfilter conntrack re-assembly pointer

View File

@ -180,6 +180,7 @@ struct sock_common {
* @sk_sndmsg_off: cached offset for sendmsg
* @sk_send_head: front of stuff to transmit
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_write_pending: a write to stream socket waits to start
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed

View File

@ -366,7 +366,8 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
DECLARE_MAC_BUF(mac);
DECLARE_MAC_BUF(mac2);
/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
*
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
@ -404,11 +405,8 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
pr_debug("%s: about to send skb: %p to dev: %s\n",
__FUNCTION__, skb, skb->dev->name);
pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n",
veth->h_dest[0], veth->h_dest[1], veth->h_dest[2],
veth->h_dest[3], veth->h_dest[4], veth->h_dest[5],
veth->h_source[0], veth->h_source[1], veth->h_source[2],
veth->h_source[3], veth->h_source[4], veth->h_source[5],
pr_debug(" %s %s %4hx %4hx %4hx\n",
print_mac(mac, veth->h_dest), print_mac(mac2, veth->h_source),
veth->h_vlan_proto, veth->h_vlan_TCI,
veth->h_vlan_encapsulated_proto);

View File

@ -183,8 +183,7 @@ pack_sg_list(struct scatterlist *sg, int start, int limit, char *data,
sg_set_buf(&sg[index++], data, s);
count -= s;
data += s;
if (index > limit)
BUG();
BUG_ON(index > limit);
}
return index-start;

View File

@ -117,6 +117,12 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
unsigned char *p;
int frontlen, len, fragno, ka9qfrag, first = 1;
if (paclen < 16) {
WARN_ON_ONCE(1);
kfree_skb(skb);
return;
}
if ((skb->len - 1) > paclen) {
if (*skb->data == AX25_P_TEXT) {
skb_pull(skb, 1); /* skip PID */
@ -251,8 +257,6 @@ void ax25_kick(ax25_cb *ax25)
if (start == end)
return;
ax25->vs = start;
/*
* Transmit data until either we're out of data to send or
* the window is full. Send a poll on the final I frame if
@ -261,8 +265,13 @@ void ax25_kick(ax25_cb *ax25)
/*
* Dequeue the frame and copy it.
* Check for race with ax25_clear_queues().
*/
skb = skb_dequeue(&ax25->write_queue);
if (!skb)
return;
ax25->vs = start;
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {

View File

@ -260,7 +260,6 @@ int hci_conn_del(struct hci_conn *conn)
tasklet_enable(&hdev->tx_task);
skb_queue_purge(&conn->data_q);
hci_conn_del_sysfs(conn);
hci_dev_put(hdev);
return 0;
}

View File

@ -24,6 +24,7 @@
/* Bluetooth HCI core. */
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/kmod.h>
@ -1321,7 +1322,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
hci_acl_tx_to(hdev);
}
@ -1543,7 +1544,7 @@ static void hci_cmd_task(unsigned long arg)
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
}

View File

@ -320,28 +320,34 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
queue_work(btaddconn, &conn->work);
}
/*
* The rfcomm tty device will possibly retain even when conn
* is down, and sysfs doesn't support move zombie device,
* so we should move the device before conn device is destroyed.
*/
static int __match_tty(struct device *dev, void *data)
{
/* The rfcomm tty device will possibly retain even when conn
* is down, and sysfs doesn't support move zombie device,
* so we should move the device before conn device is destroyed.
* Due to the only child device of hci_conn dev is rfcomm
* tty_dev, here just return 1
*/
return 1;
return !strncmp(dev->bus_id, "rfcomm", 6);
}
static void del_conn(struct work_struct *work)
{
struct device *dev;
struct hci_conn *conn = container_of(work, struct hci_conn, work);
struct hci_dev *hdev = conn->hdev;
while (dev = device_find_child(&conn->dev, NULL, __match_tty)) {
while (1) {
struct device *dev;
dev = device_find_child(&conn->dev, NULL, __match_tty);
if (!dev)
break;
device_move(dev, NULL);
put_device(dev);
}
device_del(&conn->dev);
put_device(&conn->dev);
hci_dev_put(hdev);
}
void hci_conn_del_sysfs(struct hci_conn *conn)

View File

@ -834,12 +834,18 @@ static void neigh_timer_handler(unsigned long arg)
}
if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
/* keep skb alive even if arp_queue overflows */
if (skb)
skb_get(skb);
write_unlock(&neigh->lock);
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
}
if (skb)
kfree_skb(skb);
} else {
out:
write_unlock(&neigh->lock);
write_unlock(&neigh->lock);
}
if (notify)
neigh_update_notify(neigh);

View File

@ -504,7 +504,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
static int set_operstate(struct net_device *dev, unsigned char transition, bool send_notification)
static void set_operstate(struct net_device *dev, unsigned char transition)
{
unsigned char operstate = dev->operstate;
@ -527,12 +527,8 @@ static int set_operstate(struct net_device *dev, unsigned char transition, bool
write_lock_bh(&dev_base_lock);
dev->operstate = operstate;
write_unlock_bh(&dev_base_lock);
if (send_notification)
netdev_state_change(dev);
return 1;
} else
return 0;
netdev_state_change(dev);
}
}
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
@ -826,7 +822,6 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
if (tb[IFLA_BROADCAST]) {
nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
send_addr_notify = 1;
modified = 1;
}
if (ifm->ifi_flags || ifm->ifi_change) {
@ -839,23 +834,16 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
dev_change_flags(dev, flags);
}
if (tb[IFLA_TXQLEN]) {
if (dev->tx_queue_len != nla_get_u32(tb[IFLA_TXQLEN])) {
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
modified = 1;
}
}
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
if (tb[IFLA_OPERSTATE])
modified |= set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]), false);
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE]) {
if (dev->link_mode != nla_get_u8(tb[IFLA_LINKMODE])) {
write_lock_bh(&dev_base_lock);
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
write_lock_bh(&dev_base_lock);
modified = 1;
}
write_lock_bh(&dev_base_lock);
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
write_unlock_bh(&dev_base_lock);
}
err = 0;
@ -869,10 +857,6 @@ errout:
if (send_addr_notify)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
if (modified)
netdev_state_change(dev);
return err;
}
@ -990,7 +974,7 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
if (tb[IFLA_OPERSTATE])
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]), true);
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);

View File

@ -368,6 +368,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (!(neigh->nud_state&NUD_VALID))
printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n");
dst_ha = neigh->ha;
read_lock_bh(&neigh->lock);
} else if ((probes -= neigh->parms->app_probes) < 0) {
#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
@ -377,6 +378,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
dst_ha, dev->dev_addr, NULL);
if (dst_ha)
read_unlock_bh(&neigh->lock);
}
static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)

View File

@ -1390,7 +1390,7 @@ static int __init ip_auto_config(void)
* Clue in the operator.
*/
printk("IP-Config: Complete:");
printk("\n device=%s", ic_dev->name);
printk("\n device=%s", ic_dev->name);
printk(", addr=%u.%u.%u.%u", NIPQUAD(ic_myaddr));
printk(", mask=%u.%u.%u.%u", NIPQUAD(ic_netmask));
printk(", gw=%u.%u.%u.%u", NIPQUAD(ic_gateway));

View File

@ -719,7 +719,7 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
}
/*
* Send a SYN-ACK after having received an ACK.
* Send a SYN-ACK after having received a SYN.
* This still operates on a request_sock only, not on a big
* socket.
*/

View File

@ -752,14 +752,6 @@ static int __init inet6_init(void)
BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
#ifdef MODULE
#if 0 /* FIXME --RR */
if (!mod_member_present(&__this_module, can_unload))
return -EINVAL;
__this_module.can_unload = &ipv6_unload;
#endif
#endif
err = proto_register(&tcpv6_prot, 1);
if (err)
goto out;

View File

@ -1151,7 +1151,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
fn = fn->parent;
}
/* No more references are possible at this point. */
if (atomic_read(&rt->rt6i_ref) != 1) BUG();
BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
}
inet6_rt_notify(RTM_DELROUTE, rt, info);

View File

@ -550,6 +550,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
ip_rt_put(rt);
goto out;
}
skb2->dst = (struct dst_entry *)rt;
} else {
ip_rt_put(rt);
if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,

View File

@ -165,6 +165,7 @@ static int ieee80211_open(struct net_device *dev)
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_if_init_conf conf;
int res;
bool need_hw_reconfig = 0;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@ -218,7 +219,7 @@ static int ieee80211_open(struct net_device *dev)
res = local->ops->start(local_to_hw(local));
if (res)
return res;
ieee80211_hw_config(local);
need_hw_reconfig = 1;
ieee80211_led_radio(local, local->hw.conf.radio_enabled);
}
@ -282,6 +283,8 @@ static int ieee80211_open(struct net_device *dev)
atomic_inc(&local->iff_promiscs);
local->open_count++;
if (need_hw_reconfig)
ieee80211_hw_config(local);
netif_start_queue(dev);

View File

@ -718,36 +718,35 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
* NetLabel Generic NETLINK Command Definitions
*/
static struct genl_ops netlbl_cipsov4_genl_c_add = {
static struct genl_ops netlbl_cipsov4_ops[] = {
{
.cmd = NLBL_CIPSOV4_C_ADD,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_cipsov4_genl_policy,
.doit = netlbl_cipsov4_add,
.dumpit = NULL,
};
static struct genl_ops netlbl_cipsov4_genl_c_remove = {
},
{
.cmd = NLBL_CIPSOV4_C_REMOVE,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_cipsov4_genl_policy,
.doit = netlbl_cipsov4_remove,
.dumpit = NULL,
};
static struct genl_ops netlbl_cipsov4_genl_c_list = {
},
{
.cmd = NLBL_CIPSOV4_C_LIST,
.flags = 0,
.policy = netlbl_cipsov4_genl_policy,
.doit = netlbl_cipsov4_list,
.dumpit = NULL,
};
static struct genl_ops netlbl_cipsov4_genl_c_listall = {
},
{
.cmd = NLBL_CIPSOV4_C_LISTALL,
.flags = 0,
.policy = netlbl_cipsov4_genl_policy,
.doit = NULL,
.dumpit = netlbl_cipsov4_listall,
},
};
/*
@ -762,30 +761,20 @@ static struct genl_ops netlbl_cipsov4_genl_c_listall = {
* mechanism. Returns zero on success, negative values on failure.
*
*/
int netlbl_cipsov4_genl_init(void)
int __init netlbl_cipsov4_genl_init(void)
{
int ret_val;
int ret_val, i;
ret_val = genl_register_family(&netlbl_cipsov4_gnl_family);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
&netlbl_cipsov4_genl_c_add);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
&netlbl_cipsov4_genl_c_remove);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
&netlbl_cipsov4_genl_c_list);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
&netlbl_cipsov4_genl_c_listall);
if (ret_val != 0)
return ret_val;
for (i = 0; i < ARRAY_SIZE(netlbl_cipsov4_ops); i++) {
ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
&netlbl_cipsov4_ops[i]);
if (ret_val != 0)
return ret_val;
}
return 0;
}

View File

@ -171,7 +171,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
* values on error.
*
*/
int netlbl_domhsh_init(u32 size)
int __init netlbl_domhsh_init(u32 size)
{
u32 iter;
struct netlbl_domhsh_tbl *hsh_tbl;

View File

@ -517,68 +517,63 @@ version_failure:
* NetLabel Generic NETLINK Command Definitions
*/
static struct genl_ops netlbl_mgmt_genl_c_add = {
static struct genl_ops netlbl_mgmt_genl_ops[] = {
{
.cmd = NLBL_MGMT_C_ADD,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_add,
.dumpit = NULL,
};
static struct genl_ops netlbl_mgmt_genl_c_remove = {
},
{
.cmd = NLBL_MGMT_C_REMOVE,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_remove,
.dumpit = NULL,
};
static struct genl_ops netlbl_mgmt_genl_c_listall = {
},
{
.cmd = NLBL_MGMT_C_LISTALL,
.flags = 0,
.policy = netlbl_mgmt_genl_policy,
.doit = NULL,
.dumpit = netlbl_mgmt_listall,
};
static struct genl_ops netlbl_mgmt_genl_c_adddef = {
},
{
.cmd = NLBL_MGMT_C_ADDDEF,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_adddef,
.dumpit = NULL,
};
static struct genl_ops netlbl_mgmt_genl_c_removedef = {
},
{
.cmd = NLBL_MGMT_C_REMOVEDEF,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_removedef,
.dumpit = NULL,
};
static struct genl_ops netlbl_mgmt_genl_c_listdef = {
},
{
.cmd = NLBL_MGMT_C_LISTDEF,
.flags = 0,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_listdef,
.dumpit = NULL,
};
static struct genl_ops netlbl_mgmt_genl_c_protocols = {
},
{
.cmd = NLBL_MGMT_C_PROTOCOLS,
.flags = 0,
.policy = netlbl_mgmt_genl_policy,
.doit = NULL,
.dumpit = netlbl_mgmt_protocols,
};
static struct genl_ops netlbl_mgmt_genl_c_version = {
},
{
.cmd = NLBL_MGMT_C_VERSION,
.flags = 0,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_version,
.dumpit = NULL,
},
};
/*
@ -593,46 +588,20 @@ static struct genl_ops netlbl_mgmt_genl_c_version = {
* mechanism. Returns zero on success, negative values on failure.
*
*/
int netlbl_mgmt_genl_init(void)
int __init netlbl_mgmt_genl_init(void)
{
int ret_val;
int ret_val, i;
ret_val = genl_register_family(&netlbl_mgmt_gnl_family);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_c_add);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_c_remove);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_c_listall);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_c_adddef);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_c_removedef);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_c_listdef);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_c_protocols);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_c_version);
if (ret_val != 0)
return ret_val;
for (i = 0; i < ARRAY_SIZE(netlbl_mgmt_genl_ops); i++) {
ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
&netlbl_mgmt_genl_ops[i]);
if (ret_val != 0)
return ret_val;
}
return 0;
}

View File

@ -1553,68 +1553,63 @@ unlabel_staticlistdef_return:
* NetLabel Generic NETLINK Command Definitions
*/
static struct genl_ops netlbl_unlabel_genl_c_staticadd = {
static struct genl_ops netlbl_unlabel_genl_ops[] = {
{
.cmd = NLBL_UNLABEL_C_STATICADD,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_staticadd,
.dumpit = NULL,
};
static struct genl_ops netlbl_unlabel_genl_c_staticremove = {
},
{
.cmd = NLBL_UNLABEL_C_STATICREMOVE,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_staticremove,
.dumpit = NULL,
};
static struct genl_ops netlbl_unlabel_genl_c_staticlist = {
},
{
.cmd = NLBL_UNLABEL_C_STATICLIST,
.flags = 0,
.policy = netlbl_unlabel_genl_policy,
.doit = NULL,
.dumpit = netlbl_unlabel_staticlist,
};
static struct genl_ops netlbl_unlabel_genl_c_staticadddef = {
},
{
.cmd = NLBL_UNLABEL_C_STATICADDDEF,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_staticadddef,
.dumpit = NULL,
};
static struct genl_ops netlbl_unlabel_genl_c_staticremovedef = {
},
{
.cmd = NLBL_UNLABEL_C_STATICREMOVEDEF,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_staticremovedef,
.dumpit = NULL,
};
static struct genl_ops netlbl_unlabel_genl_c_staticlistdef = {
},
{
.cmd = NLBL_UNLABEL_C_STATICLISTDEF,
.flags = 0,
.policy = netlbl_unlabel_genl_policy,
.doit = NULL,
.dumpit = netlbl_unlabel_staticlistdef,
};
static struct genl_ops netlbl_unlabel_genl_c_accept = {
},
{
.cmd = NLBL_UNLABEL_C_ACCEPT,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_accept,
.dumpit = NULL,
};
static struct genl_ops netlbl_unlabel_genl_c_list = {
},
{
.cmd = NLBL_UNLABEL_C_LIST,
.flags = 0,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_list,
.dumpit = NULL,
},
};
/*
@ -1629,53 +1624,20 @@ static struct genl_ops netlbl_unlabel_genl_c_list = {
* mechanism. Returns zero on success, negative values on failure.
*
*/
int netlbl_unlabel_genl_init(void)
int __init netlbl_unlabel_genl_init(void)
{
int ret_val;
int ret_val, i;
ret_val = genl_register_family(&netlbl_unlabel_gnl_family);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_c_staticadd);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_c_staticremove);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_c_staticlist);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_c_staticadddef);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_c_staticremovedef);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_c_staticlistdef);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_c_accept);
if (ret_val != 0)
return ret_val;
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_c_list);
if (ret_val != 0)
return ret_val;
for (i = 0; i < ARRAY_SIZE(netlbl_unlabel_genl_ops); i++) {
ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
&netlbl_unlabel_genl_ops[i]);
if (ret_val != 0)
return ret_val;
}
return 0;
}
@ -1699,7 +1661,7 @@ static struct notifier_block netlbl_unlhsh_netdev_notifier = {
* non-zero values on error.
*
*/
int netlbl_unlabel_init(u32 size)
int __init netlbl_unlabel_init(u32 size)
{
u32 iter;
struct netlbl_unlhsh_tbl *hsh_tbl;
@ -1803,7 +1765,7 @@ unlabel_getattr_nolabel:
* and to send unlabeled network traffic by default.
*
*/
int netlbl_unlabel_defconf(void)
int __init netlbl_unlabel_defconf(void)
{
int ret_val;
struct netlbl_dom_map *entry;

View File

@ -59,7 +59,7 @@
* non-zero on failure.
*
*/
int netlbl_netlink_init(void)
int __init netlbl_netlink_init(void)
{
int ret_val;

View File

@ -156,8 +156,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
false);
spin_unlock(&call->lock);
notification = NULL;
if (ret < 0)
BUG();
BUG_ON(ret < 0);
}
spin_unlock(&call->conn->state_lock);

View File

@ -814,8 +814,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
spin_lock_bh(&call->lock);
ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
spin_unlock_bh(&call->lock);
if (ret < 0)
BUG();
BUG_ON(ret < 0);
}
return 0;

View File

@ -331,15 +331,31 @@ static void xfrm_dst_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
unsigned int nhashmask)
{
struct hlist_node *entry, *tmp;
struct hlist_node *entry, *tmp, *entry0 = NULL;
struct xfrm_policy *pol;
unsigned int h0 = 0;
redo:
hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
unsigned int h;
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask);
hlist_add_head(&pol->bydst, ndsttable+h);
if (!entry0) {
hlist_del(entry);
hlist_add_head(&pol->bydst, ndsttable+h);
h0 = h;
} else {
if (h != h0)
continue;
hlist_del(entry);
hlist_add_after(entry0, &pol->bydst);
}
entry0 = entry;
}
if (!hlist_empty(list)) {
entry0 = NULL;
goto redo;
}
}