Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking updates from David Miller:

 1) Alexey Kuznetsov noticed we routed TCP resets improperly in the
    assymetric routing case, fix this by reverting a change that made us
    use the incoming interface in the outgoing route key when we didn't
    have a socket context to work with.

 2) TCP sysctl kernel memory leakage to userspace fix from Alan Cox.

 3) Move UAPI bits from David Howells, WIMAX and CAN this time.

 4) Fix TX stalls in e1000e wrt.  Byte Queue Limits, from Hiroaki
    SHIMODA, Denys Fedoryshchenko, and Jesse Brandeburg.

 5) Fix IPV6 crashes in packet generator module, from Amerigo Wang.

 6) Tidies and fixes in the new VXLAN driver from Stephen Hemminger.

 7) Bridge IP options parse doesn't check first if SKB header has at
    least an IP header's worth of content present.  Fix from Sarveshwar
    Bandi.

 8) The kernel now generates compound pages on transmit and the Xen
    netback drivers needs some adjustments in order to handle this.  Fix
    from Ian Campbell.

 9) Turn off ASPM in JME driver, from Kevin Bardon and Matthew Garrett.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (43 commits)
  mcs7830: Fix link state detection
  net: add doc for in4_pton()
  net: add doc for in6_pton()
  vti: fix sparse bit endian warnings
  tcp: resets are misrouted
  usbnet: Support devices reporting idleness
  Add CDC-ACM support for the CX93010-2x UCMxx USB Modem
  net/ethernet/jme: disable ASPM
  tcp: sysctl interface leaks 16 bytes of kernel memory
  kaweth: print correct debug ptr
  e1000e: Change wthresh to 1 to avoid possible Tx stalls
  ipv4: fix route mark sparse warning
  xen: netback: handle compound page fragments on transmit.
  bridge: Pull ip header into skb->data before looking into ip header.
  isdn: fix a wrapping bug in isdn_ppp_ioctl()
  vxlan: fix oops when give unknown ifindex
  vxlan: fix receive checksum handling
  vxlan: add additional headroom
  vxlan: allow configuring port range
  vxlan: associate with tunnel socket on transmit
  ...
This commit is contained in:
Linus Torvalds 2012-10-13 10:51:48 +09:00
commit 98260daa18
51 changed files with 450 additions and 296 deletions

View File

@ -595,7 +595,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
j = ipc->num / (sizeof(long) * 8);
i = ipc->num % (sizeof(long) * 8);
if (j < 8)
protos[j] |= (0x1 << i);
protos[j] |= (1UL << i);
ipc = ipc->next;
}
if ((r = set_arg(argp, protos, 8 * sizeof(long))))

View File

@ -175,13 +175,13 @@ struct e1000_info;
/*
* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
* WTHRESH=4, and since we want 64 bytes at a time written back, set
* it to 5
* WTHRESH=4, so a setting of 5 gives the most efficient bus
* utilization but to avoid possible Tx stalls, set it to 1
*/
#define E1000_TXDCTL_DMA_BURST_ENABLE \
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
E1000_TXDCTL_COUNT_DESC | \
(5 << 16) | /* wthresh must be +1 more than desired */\
(1 << 16) | /* wthresh must be +1 more than desired */\
(1 << 8) | /* hthresh */ \
0x1f) /* pthresh */

View File

@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* set up some performance related parameters to encourage the
* hardware to use the bus more efficiently in bursts, depends
* on the tx_int_delay to be enabled,
* wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
* wthresh = 1 ==> burst write is disabled to avoid Tx stalls
* hthresh = 1 ==> prefetch when one or more available
* pthresh = 0x1f ==> prefetch if internal cache 31 or less
* BEWARE: this seems to work but should be considered first if

View File

@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@ -2973,6 +2974,9 @@ jme_init_one(struct pci_dev *pdev,
/*
* set up PCI device basics
*/
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
PCIE_LINK_STATE_CLKPM);
rc = pci_enable_device(pdev);
if (rc) {
pr_err("Cannot enable PCI device\n");

View File

@ -244,8 +244,12 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* - suspend: peripheral ready to suspend
* - response: suggest N millisec polling
* - response complete: suggest N sec polling
*
* Suspend is reported and maybe heeded.
*/
case 2: /* Suspend hint */
usbnet_device_suggests_idle(dev);
continue;
case 3: /* Response hint */
case 4: /* Response complete hint */
continue;

View File

@ -424,7 +424,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
netdev_dbg(kaweth->net,
"Downloading firmware at %p to kaweth device at %p\n",
fw->data, kaweth);
kaweth->firmware_buf, kaweth);
netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
return kaweth_control(kaweth,

View File

@ -117,6 +117,7 @@ enum {
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
u8 link_counter;
};
static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@ -632,20 +633,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static void mcs7830_status(struct usbnet *dev, struct urb *urb)
{
u8 *buf = urb->transfer_buffer;
bool link;
bool link, link_changed;
struct mcs7830_data *data = mcs7830_get_data(dev);
if (urb->actual_length < 16)
return;
link = !(buf[1] & 0x20);
if (netif_carrier_ok(dev->net) != link) {
if (link) {
netif_carrier_on(dev->net);
usbnet_defer_kevent(dev, EVENT_LINK_RESET);
} else
netif_carrier_off(dev->net);
netdev_dbg(dev->net, "Link Status is: %d\n", link);
}
link_changed = netif_carrier_ok(dev->net) != link;
if (link_changed) {
data->link_counter++;
/*
track link state 20 times to guard against erroneous
link state changes reported sometimes by the chip
*/
if (data->link_counter > 20) {
data->link_counter = 0;
if (link) {
netif_carrier_on(dev->net);
usbnet_defer_kevent(dev, EVENT_LINK_RESET);
} else
netif_carrier_off(dev->net);
netdev_dbg(dev->net, "Link Status is: %d\n", link);
}
} else
data->link_counter = 0;
}
static const struct driver_info moschip_info = {

View File

@ -1588,10 +1588,27 @@ int usbnet_resume (struct usb_interface *intf)
tasklet_schedule (&dev->bh);
}
}
if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags))
usb_autopm_get_interface_no_resume(intf);
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_resume);
/*
* Either a subdriver implements manage_power, then it is assumed to always
* be ready to be suspended or it reports the readiness to be suspended
* explicitly
*/
void usbnet_device_suggests_idle(struct usbnet *dev)
{
if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) {
dev->intf->needs_remote_wakeup = 1;
usb_autopm_put_interface_async(dev->intf);
}
}
EXPORT_SYMBOL(usbnet_device_suggests_idle);
/*-------------------------------------------------------------------------*/

View File

@ -106,6 +106,8 @@ struct vxlan_dev {
__be32 gaddr; /* multicast group */
__be32 saddr; /* source address */
unsigned int link; /* link to multicast over */
__u16 port_min; /* source port range */
__u16 port_max;
__u8 tos; /* TOS override */
__u8 ttl;
bool learn;
@ -228,9 +230,9 @@ static u32 eth_hash(const unsigned char *addr)
/* only want 6 bytes */
#ifdef __BIG_ENDIAN
value <<= 16;
#else
value >>= 16;
#else
value <<= 16;
#endif
return hash_64(value, FDB_HASH_BITS);
}
@ -535,7 +537,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
}
__skb_pull(skb, sizeof(struct vxlanhdr));
skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
/* Is this VNI defined? */
vni = ntohl(vxh->vx_vni) >> 8;
@ -554,7 +555,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
/* Re-examine inner Ethernet packet */
oip = ip_hdr(skb);
skb->protocol = eth_type_trans(skb, vxlan->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
/* Ignore packet loops (and multicast echo) */
if (compare_ether_addr(eth_hdr(skb)->h_source,
@ -566,6 +566,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
__skb_tunnel_rx(skb, vxlan->dev);
skb_reset_network_header(skb);
skb->ip_summed = CHECKSUM_NONE;
err = IP_ECN_decapsulate(oip, skb);
if (unlikely(err)) {
@ -621,46 +622,89 @@ static inline u8 vxlan_ecn_encap(u8 tos,
return INET_ECN_encapsulate(tos, inner);
}
static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb)
{
const struct ethhdr *eth = (struct ethhdr *) skb->data;
const struct vxlan_fdb *f;
if (is_multicast_ether_addr(eth->h_dest))
return vxlan->gaddr;
f = vxlan_find_mac(vxlan, eth->h_dest);
if (f)
return f->remote_ip;
else
return vxlan->gaddr;
}
static void vxlan_sock_free(struct sk_buff *skb)
{
sock_put(skb->sk);
}
/* On transmit, associate with the tunnel socket */
static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct sock *sk = vn->sock->sk;
skb_orphan(skb);
sock_hold(sk);
skb->sk = sk;
skb->destructor = vxlan_sock_free;
}
/* Compute source port for outgoing packet
* first choice to use L4 flow hash since it will spread
* better and maybe available from hardware
* secondary choice is to use jhash on the Ethernet header
*/
static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
{
unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
u32 hash;
hash = skb_get_rxhash(skb);
if (!hash)
hash = jhash(skb->data, 2 * ETH_ALEN,
(__force u32) skb->protocol);
return (((u64) hash * range) >> 32) + vxlan->port_min;
}
/* Transmit local packets over Vxlan
*
* Outer IP header inherits ECN and DF from inner header.
* Outer UDP destination is the VXLAN assigned port.
* source port is based on hash of flow if available
* otherwise use a random value
* source port is based on hash of flow
*/
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct rtable *rt;
const struct ethhdr *eth;
const struct iphdr *old_iph;
struct iphdr *iph;
struct vxlanhdr *vxh;
struct udphdr *uh;
struct flowi4 fl4;
struct vxlan_fdb *f;
unsigned int pkt_len = skb->len;
u32 hash;
__be32 dst;
__u16 src_port;
__be16 df = 0;
__u8 tos, ttl;
int err;
dst = vxlan_find_dst(vxlan, skb);
if (!dst)
goto drop;
/* Need space for new headers (invalidates iph ptr) */
if (skb_cow_head(skb, VXLAN_HEADROOM))
goto drop;
eth = (void *)skb->data;
old_iph = ip_hdr(skb);
if (!is_multicast_ether_addr(eth->h_dest) &&
(f = vxlan_find_mac(vxlan, eth->h_dest)))
dst = f->remote_ip;
else if (vxlan->gaddr) {
dst = vxlan->gaddr;
} else
goto drop;
ttl = vxlan->ttl;
if (!ttl && IN_MULTICAST(ntohl(dst)))
ttl = 1;
@ -669,11 +713,15 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (tos == 1)
tos = vxlan_get_dsfield(old_iph, skb);
hash = skb_get_rxhash(skb);
src_port = vxlan_src_port(vxlan, skb);
rt = ip_route_output_gre(dev_net(dev), &fl4, dst,
vxlan->saddr, vxlan->vni,
RT_TOS(tos), vxlan->link);
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = vxlan->link;
fl4.flowi4_tos = RT_TOS(tos);
fl4.daddr = dst;
fl4.saddr = vxlan->saddr;
rt = ip_route_output_key(dev_net(dev), &fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n", &dst);
dev->stats.tx_carrier_errors++;
@ -702,7 +750,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
uh = udp_hdr(skb);
uh->dest = htons(vxlan_port);
uh->source = hash ? :random32();
uh->source = htons(src_port);
uh->len = htons(skb->len);
uh->check = 0;
@ -715,10 +763,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
iph->frag_off = df;
iph->protocol = IPPROTO_UDP;
iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
iph->daddr = fl4.daddr;
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
vxlan_set_owner(dev, skb);
/* See __IPTUNNEL_XMIT */
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(iph, &rt->dst, NULL);
@ -928,9 +978,11 @@ static void vxlan_setup(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
unsigned h;
int low, high;
eth_hw_addr_random(dev);
ether_setup(dev);
dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = vxlan_free;
@ -947,6 +999,10 @@ static void vxlan_setup(struct net_device *dev)
vxlan->age_timer.function = vxlan_cleanup;
vxlan->age_timer.data = (unsigned long) vxlan;
inet_get_local_port_range(&low, &high);
vxlan->port_min = low;
vxlan->port_max = high;
vxlan->dev = dev;
for (h = 0; h < FDB_HASH_SIZE; ++h)
@ -963,6 +1019,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
[IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
[IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
[IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@ -995,6 +1052,18 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
return -EADDRNOTAVAIL;
}
}
if (data[IFLA_VXLAN_PORT_RANGE]) {
const struct ifla_vxlan_port_range *p
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
if (ntohs(p->high) < ntohs(p->low)) {
pr_debug("port range %u .. %u not valid\n",
ntohs(p->low), ntohs(p->high));
return -EINVAL;
}
}
return 0;
}
@ -1021,14 +1090,18 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_LOCAL])
vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
if (data[IFLA_VXLAN_LINK]) {
vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]);
if (data[IFLA_VXLAN_LINK] &&
(vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
struct net_device *lowerdev
= __dev_get_by_index(net, vxlan->link);
if (!tb[IFLA_MTU]) {
struct net_device *lowerdev;
lowerdev = __dev_get_by_index(net, vxlan->link);
dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
if (!lowerdev) {
pr_info("ifindex %d does not exist\n", vxlan->link);
return -ENODEV;
}
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
}
if (data[IFLA_VXLAN_TOS])
@ -1045,6 +1118,13 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_LIMIT])
vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
if (data[IFLA_VXLAN_PORT_RANGE]) {
const struct ifla_vxlan_port_range *p
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
vxlan->port_min = ntohs(p->low);
vxlan->port_max = ntohs(p->high);
}
err = register_netdevice(dev);
if (!err)
hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
@ -1073,12 +1153,17 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
0;
}
static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
const struct vxlan_dev *vxlan = netdev_priv(dev);
struct ifla_vxlan_port_range ports = {
.low = htons(vxlan->port_min),
.high = htons(vxlan->port_max),
};
if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
goto nla_put_failure;
@ -1099,6 +1184,9 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
goto nla_put_failure;
return 0;
nla_put_failure:

View File

@ -1804,7 +1804,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
int ret;
struct ath5k_hw *ah = hw->priv;
struct ath5k_vif *avf = (void *)vif->drv_priv;
struct ath5k_vif *avf;
struct sk_buff *skb;
if (WARN_ON(!vif)) {
@ -1819,6 +1819,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
avf = (void *)vif->drv_priv;
ath5k_txbuf_free_skb(ah, avf->bbuf);
avf->bbuf->skb = skb;
ret = ath5k_beacon_setup(ah, avf->bbuf);

View File

@ -120,7 +120,7 @@ static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ath_tx_start(hw, skb, &txctl) != 0) {
ath_dbg(common, XMIT, "CABQ TX failed\n");
dev_kfree_skb_any(skb);
ieee80211_free_txskb(hw, skb);
}
}

View File

@ -1450,9 +1450,14 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
if (!ah->reset_power_on)
type = ATH9K_RESET_POWER_ON;
switch (type) {
case ATH9K_RESET_POWER_ON:
ret = ath9k_hw_set_reset_power_on(ah);
if (!ret)
ah->reset_power_on = true;
break;
case ATH9K_RESET_WARM:
case ATH9K_RESET_COLD:

View File

@ -741,6 +741,7 @@ struct ath_hw {
u32 rfkill_polarity;
u32 ah_flags;
bool reset_power_on;
bool htc_reset_init;
enum nl80211_iftype opmode;

View File

@ -639,8 +639,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath_err(common,
"Unable to reset hardware; reset status %d (freq %u MHz)\n",
r, curchan->center_freq);
spin_unlock_bh(&sc->sc_pcu_lock);
goto mutex_unlock;
ah->reset_power_on = false;
}
/* Setup our intr mask. */
@ -665,11 +664,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
clear_bit(SC_OP_INVALID, &sc->sc_flags);
sc->sc_ah->is_monitoring = false;
if (!ath_complete_reset(sc, false)) {
r = -EIO;
spin_unlock_bh(&sc->sc_pcu_lock);
goto mutex_unlock;
}
if (!ath_complete_reset(sc, false))
ah->reset_power_on = false;
if (ah->led_pin >= 0) {
ath9k_hw_cfg_output(ah, ah->led_pin,
@ -688,12 +684,11 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
mutex_unlock:
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
return r;
return 0;
}
static void ath9k_tx(struct ieee80211_hw *hw,
@ -770,7 +765,7 @@ static void ath9k_tx(struct ieee80211_hw *hw,
return;
exit:
dev_kfree_skb_any(skb);
ieee80211_free_txskb(hw, skb);
}
static void ath9k_stop(struct ieee80211_hw *hw)

View File

@ -324,6 +324,10 @@ static int ath_pci_suspend(struct device *device)
static int ath_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
u32 val;
/*
@ -335,6 +339,9 @@ static int ath_pci_resume(struct device *device)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
ath_pci_aspm_init(common);
ah->reset_power_on = false;
return 0;
}

View File

@ -66,8 +66,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
struct sk_buff *skb,
bool dequeue);
struct sk_buff *skb);
enum {
MCS_HT20,
@ -176,7 +175,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
fi = get_frame_info(skb);
bf = fi->bf;
if (bf && fi->retries) {
if (!bf) {
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
ieee80211_free_txskb(sc->hw, skb);
continue;
}
}
if (fi->retries) {
list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
@ -785,10 +792,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fi = get_frame_info(skb);
bf = fi->bf;
if (!fi->bf)
bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf)
if (!bf) {
__skb_unlink(skb, &tid->buf_q);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
seqno = bf->bf_state.seqno;
@ -1731,9 +1741,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
return;
}
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf) {
ieee80211_free_txskb(sc->hw, skb);
return;
}
bf->bf_state.bf_type = BUF_AMPDU;
INIT_LIST_HEAD(&bf_head);
@ -1757,11 +1769,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf;
bf = fi->bf;
if (!bf)
bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
if (!bf)
return;
INIT_LIST_HEAD(&bf_head);
list_add_tail(&bf->list, &bf_head);
@ -1839,8 +1846,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
struct sk_buff *skb,
bool dequeue)
struct sk_buff *skb)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
@ -1852,7 +1858,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
bf = ath_tx_get_buffer(sc);
if (!bf) {
ath_dbg(common, XMIT, "TX buffers are full\n");
goto error;
return NULL;
}
ATH_TXBUF_RESET(bf);
@ -1881,18 +1887,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
ath_err(ath9k_hw_common(sc->sc_ah),
"dma_mapping_error() on TX\n");
ath_tx_return_buffer(sc, bf);
goto error;
return NULL;
}
fi->bf = bf;
return bf;
error:
if (dequeue)
__skb_unlink(skb, &tid->buf_q);
dev_kfree_skb_any(skb);
return NULL;
}
/* FIXME: tx power */
@ -1921,9 +1921,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
*/
ath_tx_send_ampdu(sc, tid, skb, txctl);
} else {
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf) {
if (txctl->paprd)
dev_kfree_skb_any(skb);
else
ieee80211_free_txskb(sc->hw, skb);
return;
}
bf->bf_state.bfs_paprd = txctl->paprd;

View File

@ -303,6 +303,7 @@ struct ar9170 {
unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
bool needs_full_reset;
bool force_usb_reset;
atomic_t pending_restarts;
/* interface mode settings */

View File

@ -465,27 +465,26 @@ static void carl9170_restart_work(struct work_struct *work)
{
struct ar9170 *ar = container_of(work, struct ar9170,
restart_work);
int err;
int err = -EIO;
ar->usedkeys = 0;
ar->filter_state = 0;
carl9170_cancel_worker(ar);
mutex_lock(&ar->mutex);
err = carl9170_usb_restart(ar);
if (net_ratelimit()) {
if (err) {
dev_err(&ar->udev->dev, "Failed to restart device "
" (%d).\n", err);
} else {
dev_info(&ar->udev->dev, "device restarted "
"successfully.\n");
if (!ar->force_usb_reset) {
err = carl9170_usb_restart(ar);
if (net_ratelimit()) {
if (err)
dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
else
dev_info(&ar->udev->dev, "device restarted successfully.\n");
}
}
carl9170_zap_queues(ar);
mutex_unlock(&ar->mutex);
if (!err) {
if (!err && !ar->force_usb_reset) {
ar->restart_counter++;
atomic_set(&ar->pending_restarts, 0);
@ -526,10 +525,10 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
if (!ar->registered)
return;
if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
ieee80211_queue_work(ar->hw, &ar->restart_work);
else
carl9170_usb_reset(ar);
if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
ar->force_usb_reset = true;
ieee80211_queue_work(ar->hw, &ar->restart_work);
/*
* At this point, the device instance might have vanished/disabled.

View File

@ -1596,8 +1596,9 @@ done:
}
}
if (mwifiex_bss_start(priv, bss, &req_ssid))
return -EFAULT;
ret = mwifiex_bss_start(priv, bss, &req_ssid);
if (ret)
return ret;
if (mode == NL80211_IFTYPE_ADHOC) {
/* Inform the BSS information to kernel, otherwise
@ -1652,9 +1653,19 @@ done:
"info: association to bssid %pM failed\n",
priv->cfg_bssid);
memset(priv->cfg_bssid, 0, ETH_ALEN);
if (ret > 0)
cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
NULL, 0, NULL, 0, ret,
GFP_KERNEL);
else
cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
}
return ret;
return 0;
}
/*
@ -1802,7 +1813,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
{
struct net_device *dev = request->wdev->netdev;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int i, offset;
int i, offset, ret;
struct ieee80211_channel *chan;
struct ieee_types_header *ie;
@ -1855,8 +1866,12 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->user_scan_cfg->chan_list[i].scan_time = 0;
}
if (mwifiex_scan_networks(priv, priv->user_scan_cfg))
return -EFAULT;
ret = mwifiex_scan_networks(priv, priv->user_scan_cfg);
if (ret) {
dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
return ret;
}
if (request->ie && request->ie_len) {
for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {

View File

@ -1180,16 +1180,18 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result;
struct mwifiex_bssdescriptor *bss_desc;
u16 reason_code;
adhoc_result = &resp->params.adhoc_result;
bss_desc = priv->attempted_bss_desc;
/* Join result code 0 --> SUCCESS */
if (le16_to_cpu(resp->result)) {
reason_code = le16_to_cpu(resp->result);
if (reason_code) {
dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n");
if (priv->media_connected)
mwifiex_reset_connect_state(priv);
mwifiex_reset_connect_state(priv, reason_code);
memset(&priv->curr_bss_params.bss_descriptor,
0x00, sizeof(struct mwifiex_bssdescriptor));

View File

@ -847,7 +847,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
void mwifiex_reset_connect_state(struct mwifiex_private *priv);
void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason);
u8 mwifiex_band_to_radio_type(u8 band);
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
int mwifiex_adhoc_start(struct mwifiex_private *priv,

View File

@ -1296,7 +1296,7 @@ mwifiex_radio_type_to_band(u8 radio_type)
int mwifiex_scan_networks(struct mwifiex_private *priv,
const struct mwifiex_user_scan_cfg *user_scan_in)
{
int ret = 0;
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
struct cmd_ctrl_node *cmd_node;
union mwifiex_scan_cmd_config_tlv *scan_cfg_out;
@ -1309,25 +1309,26 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
unsigned long flags;
if (adapter->scan_processing) {
dev_dbg(adapter->dev, "cmd: Scan already in process...\n");
return ret;
dev_err(adapter->dev, "cmd: Scan already in process...\n");
return -EBUSY;
}
if (priv->scan_block) {
dev_err(adapter->dev,
"cmd: Scan is blocked during association...\n");
return -EBUSY;
}
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = true;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
if (priv->scan_block) {
dev_dbg(adapter->dev,
"cmd: Scan is blocked during association...\n");
return ret;
}
scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
GFP_KERNEL);
if (!scan_cfg_out) {
dev_err(adapter->dev, "failed to alloc scan_cfg_out\n");
return -ENOMEM;
ret = -ENOMEM;
goto done;
}
buf_size = sizeof(struct mwifiex_chan_scan_param_set) *
@ -1336,7 +1337,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
if (!scan_chan_list) {
dev_err(adapter->dev, "failed to alloc scan_chan_list\n");
kfree(scan_cfg_out);
return -ENOMEM;
ret = -ENOMEM;
goto done;
}
mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config,
@ -1364,14 +1366,16 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
}
} else {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = true;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
}
kfree(scan_cfg_out);
kfree(scan_chan_list);
done:
if (ret) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
}
return ret;
}
@ -1430,8 +1434,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
ret = mwifiex_is_network_compatible(priv, bss_desc,
priv->bss_mode);
if (ret)
dev_err(priv->adapter->dev, "cannot find ssid "
"%s\n", bss_desc->ssid.ssid);
dev_err(priv->adapter->dev,
"Incompatible network settings\n");
break;
default:
ret = 0;

View File

@ -545,7 +545,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
if (!memcmp(resp->params.deauth.mac_addr,
&priv->curr_bss_params.bss_descriptor.mac_address,
sizeof(resp->params.deauth.mac_addr)))
mwifiex_reset_connect_state(priv);
mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING);
return 0;
}
@ -558,7 +558,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
mwifiex_reset_connect_state(priv);
mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING);
return 0;
}

View File

@ -41,7 +41,7 @@
* - Sends a disconnect event to upper layers/applications.
*/
void
mwifiex_reset_connect_state(struct mwifiex_private *priv)
mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
{
struct mwifiex_adapter *adapter = priv->adapter;
@ -117,10 +117,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
priv->media_connected = false;
dev_dbg(adapter->dev,
"info: successfully disconnected from %pM: reason code %d\n",
priv->cfg_bssid, WLAN_REASON_DEAUTH_LEAVING);
priv->cfg_bssid, reason_code);
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
NULL, 0, GFP_KERNEL);
cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
GFP_KERNEL);
}
memset(priv->cfg_bssid, 0, ETH_ALEN);
@ -186,7 +186,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
struct mwifiex_adapter *adapter = priv->adapter;
int ret = 0;
u32 eventcause = adapter->event_cause;
u16 ctrl;
u16 ctrl, reason_code;
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@ -204,22 +204,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_DEAUTHENTICATED:
dev_dbg(adapter->dev, "event: Deauthenticated\n");
adapter->dbg.num_event_deauth++;
if (priv->media_connected)
mwifiex_reset_connect_state(priv);
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
mwifiex_reset_connect_state(priv, reason_code);
}
break;
case EVENT_DISASSOCIATED:
dev_dbg(adapter->dev, "event: Disassociated\n");
adapter->dbg.num_event_disassoc++;
if (priv->media_connected)
mwifiex_reset_connect_state(priv);
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
mwifiex_reset_connect_state(priv, reason_code);
}
break;
case EVENT_LINK_LOST:
dev_dbg(adapter->dev, "event: Link lost\n");
adapter->dbg.num_event_link_lost++;
if (priv->media_connected)
mwifiex_reset_connect_state(priv);
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
mwifiex_reset_connect_state(priv, reason_code);
}
break;
case EVENT_PS_SLEEP:

View File

@ -2252,9 +2252,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
*/
if (rt2x00_rt(rt2x00dev, RT3352)) {
rt2800_bbp_write(rt2x00dev, 27, 0x0);
rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 27, 0x20);
rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
} else {
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);

View File

@ -335,21 +335,35 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
unsigned long bytes;
offset &= ~PAGE_MASK;
while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
if (start_new_rx_buffer(copy_off, size, 0)) {
bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(copy_off, bytes, 0)) {
count++;
copy_off = 0;
}
bytes = size;
if (copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - copy_off;
copy_off += bytes;
offset += bytes;
size -= bytes;
if (offset == PAGE_SIZE)
offset = 0;
}
}
return count;
@ -403,14 +417,24 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
unsigned long bytes;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE);
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
meta = npo->meta + npo->meta_prod - 1;
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
if (start_new_rx_buffer(npo->copy_off, size, *head)) {
bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
/*
* Netfront requires there to be some data in the head
* buffer.
@ -420,7 +444,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
meta = get_next_rx_buffer(vif, npo);
}
bytes = size;
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - npo->copy_off;
@ -453,6 +476,13 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
offset += bytes;
size -= bytes;
/* Next frame */
if (offset == PAGE_SIZE && size) {
BUG_ON(!PageCompound(page));
page++;
offset = 0;
}
/* Leave a gap for the GSO descriptor. */
if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
vif->rx.req_cons++;

View File

@ -1551,6 +1551,9 @@ static const struct usb_device_id acm_ids[] = {
Maybe we should define a new
quirk for this. */
},
{ USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
.driver_info = NO_UNION_NORMAL,
},
{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},

View File

@ -1,5 +0,0 @@
header-y += raw.h
header-y += bcm.h
header-y += gw.h
header-y += error.h
header-y += netlink.h

View File

@ -284,10 +284,16 @@ enum {
IFLA_VXLAN_LEARNING,
IFLA_VXLAN_AGEING,
IFLA_VXLAN_LIMIT,
IFLA_VXLAN_PORT_RANGE,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
struct ifla_vxlan_port_range {
__be16 low;
__be16 high;
};
/* SR-IOV virtual function management section */
enum {

View File

@ -26,32 +26,32 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
#else /* CONFIG_OF */
int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
return -ENOSYS;
}
struct phy_device *of_phy_find_device(struct device_node *phy_np)
static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
return NULL;
}
struct phy_device *of_phy_connect(struct net_device *dev,
struct device_node *phy_np,
void (*hndlr)(struct net_device *),
u32 flags, phy_interface_t iface)
static inline struct phy_device *of_phy_connect(struct net_device *dev,
struct device_node *phy_np,
void (*hndlr)(struct net_device *),
u32 flags, phy_interface_t iface)
{
return NULL;
}
struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
void (*hndlr)(struct net_device *),
phy_interface_t iface)
static inline struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
void (*hndlr)(struct net_device *),
phy_interface_t iface)
{
return NULL;
}
struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
}

View File

@ -68,6 +68,7 @@ struct usbnet {
# define EVENT_RX_PAUSED 5
# define EVENT_DEV_ASLEEP 6
# define EVENT_DEV_OPEN 7
# define EVENT_DEVICE_REPORT_IDLE 8
};
static inline struct usb_driver *driver_of(struct usb_interface *intf)
@ -160,6 +161,7 @@ extern int usbnet_probe(struct usb_interface *, const struct usb_device_id *);
extern int usbnet_suspend(struct usb_interface *, pm_message_t);
extern int usbnet_resume(struct usb_interface *);
extern void usbnet_disconnect(struct usb_interface *);
extern void usbnet_device_suggests_idle(struct usbnet *dev);
/* Drivers that reuse some of the standard USB CDC infrastructure

View File

@ -1 +0,0 @@
header-y += i2400m.h

View File

@ -1 +1,6 @@
# UAPI Header export list
header-y += bcm.h
header-y += error.h
header-y += gw.h
header-y += netlink.h
header-y += raw.h

View File

@ -1 +1,2 @@
# UAPI Header export list
header-y += i2400m.h

View File

@ -265,6 +265,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
struct net_device *dev = skb->dev;
u32 len;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);

View File

@ -248,8 +248,8 @@ struct pktgen_dev {
int removal_mark; /* non-zero => the device is marked for
* removal by worker thread */
int min_pkt_size; /* = ETH_ZLEN; */
int max_pkt_size; /* = ETH_ZLEN; */
int min_pkt_size;
int max_pkt_size;
int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
int nfrags;
struct page *page;
@ -449,8 +449,6 @@ static void pktgen_stop_all_threads_ifs(void);
static void pktgen_stop(struct pktgen_thread *t);
static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
static unsigned int scan_ip6(const char *s, char ip[16]);
/* Module parameters, defaults. */
static int pg_count_d __read_mostly = 1000;
static int pg_delay_d __read_mostly;
@ -702,8 +700,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
&pkt_dev->cur_in6_saddr,
&pkt_dev->cur_in6_daddr);
} else
seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n",
pkt_dev->cur_saddr, pkt_dev->cur_daddr);
seq_printf(seq, " cur_saddr: %pI4 cur_daddr: %pI4\n",
&pkt_dev->cur_saddr, &pkt_dev->cur_daddr);
seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n",
pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
@ -1299,7 +1297,7 @@ static ssize_t pktgen_if_write(struct file *file,
return -EFAULT;
buf[len] = 0;
scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
@ -1322,7 +1320,7 @@ static ssize_t pktgen_if_write(struct file *file,
return -EFAULT;
buf[len] = 0;
scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
@ -1344,7 +1342,7 @@ static ssize_t pktgen_if_write(struct file *file,
return -EFAULT;
buf[len] = 0;
scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr);
in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
if (debug)
@ -1365,7 +1363,7 @@ static ssize_t pktgen_if_write(struct file *file,
return -EFAULT;
buf[len] = 0;
scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
@ -2036,19 +2034,17 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
/* Set up Dest MAC */
memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
/* Set up pkt size */
pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
if (pkt_dev->flags & F_IPV6) {
/*
* Skip this automatic address setting until locks or functions
* gets exported
*/
#ifdef NOTNOW
int i, set = 0, err = 1;
struct inet6_dev *idev;
if (pkt_dev->min_pkt_size == 0) {
pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr)
+ sizeof(struct udphdr)
+ sizeof(struct pktgen_hdr)
+ pkt_dev->pkt_overhead;
}
for (i = 0; i < IN6_ADDR_HSIZE; i++)
if (pkt_dev->cur_in6_saddr.s6_addr[i]) {
set = 1;
@ -2069,9 +2065,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
for (ifp = idev->addr_list; ifp;
ifp = ifp->if_next) {
if (ifp->scope == IFA_LINK &&
list_for_each_entry(ifp, &idev->addr_list, if_list) {
if ((ifp->scope & IFA_LINK) &&
!(ifp->flags & IFA_F_TENTATIVE)) {
pkt_dev->cur_in6_saddr = ifp->addr;
err = 0;
@ -2084,8 +2079,14 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
if (err)
pr_err("ERROR: IPv6 link address not available\n");
}
#endif
} else {
if (pkt_dev->min_pkt_size == 0) {
pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr)
+ sizeof(struct udphdr)
+ sizeof(struct pktgen_hdr)
+ pkt_dev->pkt_overhead;
}
pkt_dev->saddr_min = 0;
pkt_dev->saddr_max = 0;
if (strlen(pkt_dev->src_min) == 0) {
@ -2111,6 +2112,10 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
}
/* Initialize current values. */
pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size)
pkt_dev->max_pkt_size = pkt_dev->min_pkt_size;
pkt_dev->cur_dst_mac_offset = 0;
pkt_dev->cur_src_mac_offset = 0;
pkt_dev->cur_saddr = pkt_dev->saddr_min;
@ -2758,97 +2763,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
return skb;
}
/*
* scan_ip6, fmt_ip taken from dietlibc-0.21
* Author Felix von Leitner <felix-dietlibc@fefe.de>
*
* Slightly modified for kernel.
* Should be candidate for net/ipv4/utils.c
* --ro
*/
static unsigned int scan_ip6(const char *s, char ip[16])
{
unsigned int i;
unsigned int len = 0;
unsigned long u;
char suffix[16];
unsigned int prefixlen = 0;
unsigned int suffixlen = 0;
__be32 tmp;
char *pos;
for (i = 0; i < 16; i++)
ip[i] = 0;
for (;;) {
if (*s == ':') {
len++;
if (s[1] == ':') { /* Found "::", skip to part 2 */
s += 2;
len++;
break;
}
s++;
}
u = simple_strtoul(s, &pos, 16);
i = pos - s;
if (!i)
return 0;
if (prefixlen == 12 && s[i] == '.') {
/* the last 4 bytes may be written as IPv4 address */
tmp = in_aton(s);
memcpy((struct in_addr *)(ip + 12), &tmp, sizeof(tmp));
return i + len;
}
ip[prefixlen++] = (u >> 8);
ip[prefixlen++] = (u & 255);
s += i;
len += i;
if (prefixlen == 16)
return len;
}
/* part 2, after "::" */
for (;;) {
if (*s == ':') {
if (suffixlen == 0)
break;
s++;
len++;
} else if (suffixlen != 0)
break;
u = simple_strtol(s, &pos, 16);
i = pos - s;
if (!i) {
if (*s)
len--;
break;
}
if (suffixlen + prefixlen <= 12 && s[i] == '.') {
tmp = in_aton(s);
memcpy((struct in_addr *)(suffix + suffixlen), &tmp,
sizeof(tmp));
suffixlen += 4;
len += strlen(s);
break;
}
suffix[suffixlen++] = (u >> 8);
suffix[suffixlen++] = (u & 255);
s += i;
len += i;
if (prefixlen + suffixlen == 16)
break;
}
for (i = 0; i < suffixlen; i++)
ip[16 - suffixlen + i] = suffix[i];
return len;
}
static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
struct pktgen_dev *pkt_dev)
{
@ -2927,7 +2841,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
pkt_dev->pkt_overhead;
if (datalen < sizeof(struct pktgen_hdr)) {
if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
datalen = sizeof(struct pktgen_hdr);
net_info_ratelimited("increased datalen to %d\n", datalen);
}
@ -3548,8 +3462,6 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
}
pkt_dev->removal_mark = 0;
pkt_dev->min_pkt_size = ETH_ZLEN;
pkt_dev->max_pkt_size = ETH_ZLEN;
pkt_dev->nfrags = 0;
pkt_dev->delay = pg_delay_d;
pkt_dev->count = pg_count_d;

View File

@ -107,6 +107,18 @@ static inline int xdigit2bin(char c, int delim)
return IN6PTON_UNKNOWN;
}
/**
* in4_pton - convert an IPv4 address from literal to binary representation
* @src: the start of the IPv4 address string
* @srclen: the length of the string, -1 means strlen(src)
* @dst: the binary (u8[4] array) representation of the IPv4 address
* @delim: the delimiter of the IPv4 address in @src, -1 means no delimiter
* @end: A pointer to the end of the parsed string will be placed here
*
* Return one on success, return zero when any error occurs
* and @end will point to the end of the parsed string.
*
*/
int in4_pton(const char *src, int srclen,
u8 *dst,
int delim, const char **end)
@ -161,6 +173,18 @@ out:
}
EXPORT_SYMBOL(in4_pton);
/**
* in6_pton - convert an IPv6 address from literal to binary representation
* @src: the start of the IPv6 address string
* @srclen: the length of the string, -1 means strlen(src)
* @dst: the binary (u8[16] array) representation of the IPv6 address
* @delim: the delimiter of the IPv6 address in @src, -1 means no delimiter
* @end: A pointer to the end of the parsed string will be placed here
*
* Return one on success, return zero when any error occurs
* and @end will point to the end of the parsed string.
*
*/
int in6_pton(const char *src, int srclen,
u8 *dst,
int delim, const char **end)

View File

@ -374,7 +374,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&fl4, 0, sizeof(fl4));
flowi4_init_output(&fl4, tunnel->parms.link,
htonl(tunnel->parms.i_key), RT_TOS(tos),
be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
RT_SCOPE_UNIVERSE,
IPPROTO_IPIP, 0,
dst, tiph->saddr, 0, 0);
@ -441,7 +441,7 @@ static int vti_tunnel_bind_dev(struct net_device *dev)
struct flowi4 fl4;
memset(&fl4, 0, sizeof(fl4));
flowi4_init_output(&fl4, tunnel->parms.link,
htonl(tunnel->parms.i_key),
be32_to_cpu(tunnel->parms.i_key),
RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
IPPROTO_IPIP, 0,
iph->daddr, iph->saddr, 0, 0);

View File

@ -2220,7 +2220,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
goto nla_put_failure;
if (fl4->flowi4_mark &&
nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
goto nla_put_failure;
error = rt->dst.error;

View File

@ -248,6 +248,8 @@ int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
ctxt = rcu_dereference(tcp_fastopen_ctx);
if (ctxt)
memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
else
memset(user_key, 0, sizeof(user_key));
rcu_read_unlock();
snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",

View File

@ -708,10 +708,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
/* When socket is gone, all binding information is lost.
* routing might fail in this case. using iif for oif to
* make sure we can deliver it
* routing might fail in this case. No choice here, if we choose to force
* input interface, we will misroute in case of asymmetric route.
*/
arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
if (sk)
arg.bound_dev_if = sk->sk_bound_dev_if;
net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;

View File

@ -877,7 +877,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
fl6.flowi6_proto = IPPROTO_TCP;
fl6.flowi6_oif = inet6_iif(skb);
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = inet6_iif(skb);
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));

View File

@ -56,7 +56,6 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
u64 tsfdelta;
spin_lock_bh(&ifmsh->sync_offset_lock);
if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n",
(long long) ifmsh->sync_offset_clockdrift_max);
@ -69,11 +68,11 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
tsfdelta = -beacon_int_fraction;
ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
}
spin_unlock_bh(&ifmsh->sync_offset_lock);
tsf = drv_get_tsf(local, sdata);
if (tsf != -1ULL)
drv_set_tsf(local, sdata, tsf + tsfdelta);
spin_unlock_bh(&ifmsh->sync_offset_lock);
}
static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,

View File

@ -34,7 +34,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
skb_queue_len(&local->skb_queue_unreliable);
while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
(skb = skb_dequeue(&local->skb_queue_unreliable))) {
dev_kfree_skb_irq(skb);
ieee80211_free_txskb(hw, skb);
tmp--;
I802_DEBUG_INC(local->tx_status_drop);
}
@ -159,7 +159,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
"dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
skb_queue_len(&sta->tx_filtered[ac]),
!!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
dev_kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
}
static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)

View File

@ -354,7 +354,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
total += skb_queue_len(&sta->ps_tx_buf[ac]);
if (skb) {
purged++;
dev_kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
break;
}
}
@ -466,7 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
ps_dbg(tx->sdata,
"STA %pM TX buffer for AC %d full - dropping oldest frame\n",
sta->sta.addr, ac);
dev_kfree_skb(old);
ieee80211_free_txskb(&local->hw, old);
} else
tx->local->total_ps_buffered++;
@ -1103,7 +1103,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
spin_unlock(&tx->sta->lock);
if (purge_skb)
dev_kfree_skb(purge_skb);
ieee80211_free_txskb(&tx->local->hw, purge_skb);
}
/* reset session timer */
@ -1214,7 +1214,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (WARN_ON_ONCE(q >= local->hw.queues)) {
__skb_unlink(skb, skbs);
dev_kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
continue;
}
#endif
@ -1356,7 +1356,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
if (unlikely(res == TX_DROP)) {
I802_DEBUG_INC(tx->local->tx_handlers_drop);
if (tx->skb)
dev_kfree_skb(tx->skb);
ieee80211_free_txskb(&tx->local->hw, tx->skb);
else
__skb_queue_purge(&tx->skbs);
return -1;
@ -1393,7 +1393,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
if (unlikely(res_prepare == TX_DROP)) {
dev_kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
goto out;
} else if (unlikely(res_prepare == TX_QUEUED)) {
goto out;
@ -1465,7 +1465,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
headroom = max_t(int, 0, headroom);
if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
dev_kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
rcu_read_unlock();
return;
}
@ -2050,8 +2050,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
head_need += IEEE80211_ENCRYPT_HEADROOM;
head_need += local->tx_headroom;
head_need = max_t(int, 0, head_need);
if (ieee80211_skb_resize(sdata, skb, head_need, true))
goto fail;
if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
ieee80211_free_txskb(&local->hw, skb);
return NETDEV_TX_OK;
}
}
if (encaps_data) {
@ -2184,7 +2186,7 @@ void ieee80211_tx_pending(unsigned long data)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (WARN_ON(!info->control.vif)) {
kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
continue;
}