Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) cfg80211_conn_scan() must be called with the sched_scan_mutex, fix
    from Artem Savkov.

 2) Fix regression in TCP ICMPv6 processing, we do not want to treat
    redirects as socket errors, from Christoph Paasch.

 3) Fix several recvmsg() msg_name kernel memory leaks into userspace,
    in ATM, AX25, Bluetooth, CAIF, IRDA, s390 IUCV, L2TP, LLC, Netrom,
    NFC, Rose, TIPC, and VSOCK.  From Mathias Krause and Wei Yongjun.

 4) Fix AF_IUCV handling of segmented SKBs in recvmsg(), from Ursula
    Braun and Eric Dumazet.

 5) CAN gw.c code does kfree() on SLAB cache memory, use
    kmem_cache_free() instead.  Fix from Wei Yongjun.

 6) Fix LSM regression on TCP SYN/ACKs, some LSMs such as SELINUX want
    an skb->sk socket context available for these packets, but nothing
    else requires it.  From Eric Dumazet and Paul Moore.

 7) Fix ipv4 address lifetime processing so that we don't perform
    sleepable acts inside of rcu_read_lock() sections, do them in an
    rtnl_lock() section instead.  From Jiri Pirko.

 8) mvneta driver accidently sets HW features after device registry, it
    should do so beforehand.  Fix from Willy Tarreau.

 9) Fix bonding unload races more correctly, from Nikolay Aleksandrov
    and Veaceslav Falico.

10) rtnl_dump_ifinfo() and rtnl_calcit() invoke nlmsg_parse() with wrong
    header size argument.  Fix from Michael Riesch.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits)
  lsm: add the missing documentation for the security_skb_owned_by() hook
  bnx2x: Prevent null pointer dereference in AFEX mode
  e100: Add dma mapping error check
  selinux: add a skb_owned_by() hook
  can: gw: use kmem_cache_free() instead of kfree()
  netrom: fix invalid use of sizeof in nr_recvmsg()
  qeth: fix qeth_wait_for_threads() deadlock for OSN devices
  af_iucv: fix recvmsg by replacing skb_pull() function
  rtnetlink: Call nlmsg_parse() with correct header length
  bonding: fix bonding_masters race condition in bond unloading
  Revert "bonding: remove sysfs before removing devices"
  net: mvneta: enable features before registering the driver
  hyperv: Fix RNDIS send_completion code path
  hyperv: Fix a kernel warning from netvsc_linkstatus_callback()
  net: ipv4: fix schedule while atomic bug in check_lifetime()
  net: ipv4: reset check_lifetime_work after changing lifetime
  bnx2x: Fix KR2 rapid link flap
  sctp: remove 'sridhar' from maintainers list
  VSOCK: Fix missing msg_namelen update in vsock_stream_recvmsg()
  VSOCK: vmci - fix possible info leak in vmci_transport_dgram_dequeue()
  ...
This commit is contained in:
Linus Torvalds 2013-04-10 14:15:27 -07:00
commit fe2971a017
54 changed files with 619 additions and 377 deletions

View File

@ -6951,7 +6951,6 @@ F: drivers/scsi/st*
SCTP PROTOCOL
M: Vlad Yasevich <vyasevich@gmail.com>
M: Sridhar Samudrala <sri@us.ibm.com>
M: Neil Horman <nhorman@tuxdriver.com>
L: linux-sctp@vger.kernel.org
W: http://lksctp.sourceforge.net

View File

@ -4846,9 +4846,18 @@ static int __net_init bond_net_init(struct net *net)
static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
struct bonding *bond, *tmp_bond;
LIST_HEAD(list);
bond_destroy_sysfs(bn);
bond_destroy_proc_dir(bn);
/* Kill off any bonds created after unregistering bond rtnl ops */
rtnl_lock();
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
unregister_netdevice_queue(bond->dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations bond_net_ops = {
@ -4902,8 +4911,8 @@ static void __exit bonding_exit(void)
bond_destroy_debugfs();
unregister_pernet_subsys(&bond_net_ops);
rtnl_link_unregister(&bond_link_ops);
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
/*

View File

@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 base_page, next_page, not_kr2_device, lane;
int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
if (!sigdet) {
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
bnx2x_kr2_recovery(params, vars, phy);
return;
}
int sigdet;
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* since some switches tend to reinit the AN process and clear the
@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
vars->check_kr2_recovery_cnt--;
return;
}
sigdet = bnx2x_warpcore_get_sigdet(phy, params);
if (!sigdet) {
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
DP(NETIF_MSG_LINK, "No sigdet\n");
}
return;
}
lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);

View File

@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
q);
}
if (!NO_FCOE(bp)) {
if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@ -13354,6 +13354,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
bp->cnic_enabled = false;
kfree(bp->cnic_kwq);
bp->cnic_kwq = NULL;

View File

@ -870,7 +870,7 @@ err_unlock:
}
static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
{
struct cb *cb;
unsigned long flags;
@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
nic->cbs_avail--;
cb->skb = skb;
err = cb_prepare(nic, cb, skb);
if (err)
goto err_unlock;
if (unlikely(!nic->cbs_avail))
err = -ENOSPC;
cb_prepare(nic, cb, skb);
/* Order is important otherwise we'll be in a race with h/w:
* set S-bit in current first, then clear S-bit in previous. */
@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)
nic->mii.mdio_write = mdio_write;
}
static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct config *config = &cb->u.config;
u8 *c = (u8 *)config;
@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
"[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
return 0;
}
/*************************************************************************
@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
return fw;
}
static void e100_setup_ucode(struct nic *nic, struct cb *cb,
static int e100_setup_ucode(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
const struct firmware *fw = (void *)skb;
@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,
cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
cb->command = cpu_to_le16(cb_ucode | cb_el);
return 0;
}
static inline int e100_load_ucode_wait(struct nic *nic)
@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return err;
}
static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_iaaddr);
memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
return 0;
}
static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_dump);
cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
offsetof(struct mem, dump_buf));
return 0;
}
static int e100_phy_check_without_mii(struct nic *nic)
@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)
return 0;
}
static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
struct netdev_hw_addr *ha;
@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
return 0;
}
static void e100_set_multicast_list(struct net_device *netdev)
@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)
round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
}
static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
dma_addr_t dma_addr;
cb->command = nic->tx_command;
dma_addr = pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE);
/* If we can't map the skb, have the upper layer try later */
if (pci_dma_mapping_error(nic->pdev, dma_addr))
return -ENOMEM;
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
* testing, ie sending frames with bad CRC.
@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1;
cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
/* check for mapping failure? */
cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
skb_tx_timestamp(skb);
return 0;
}
static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,

View File

@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev)
netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
dev->priv_flags |= IFF_UNICAST_FLT;
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
goto err_deinit;
}
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->priv_flags |= IFF_UNICAST_FLT;
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
platform_set_drvdata(pdev, pp->dev);

View File

@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device,
packet->trans_id;
/* Notify the layer above us */
nvsc_packet->completion.send.send_completion(
nvsc_packet->completion.send.send_completion_ctx);
if (nvsc_packet)
nvsc_packet->completion.send.send_completion(
nvsc_packet->completion.send.
send_completion_ctx);
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device,
int ret = 0;
struct nvsp_message sendMessage;
struct net_device *ndev;
u64 req_id;
net_device = get_outbound_net_device(device);
if (!net_device)
@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device,
0xFFFFFFFF;
sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
if (packet->completion.send.send_completion)
req_id = (u64)packet;
else
req_id = 0;
if (packet->page_buf_cnt) {
ret = vmbus_sendpacket_pagebuffer(device->channel,
packet->page_buf,
packet->page_buf_cnt,
&sendMessage,
sizeof(struct nvsp_message),
(unsigned long)packet);
req_id);
} else {
ret = vmbus_sendpacket(device->channel, &sendMessage,
sizeof(struct nvsp_message),
(unsigned long)packet,
req_id,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
if (ret == 0) {

View File

@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
if (status == 1) {
netif_carrier_on(net);
netif_wake_queue(net);
ndev_ctx = netdev_priv(net);
schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
netif_tx_disable(net);
}
}

View File

@ -61,9 +61,6 @@ struct rndis_request {
static void rndis_filter_send_completion(void *ctx);
static void rndis_filter_send_request_completion(void *ctx);
static struct rndis_device *get_rndis_device(void)
{
@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf[0].len;
}
packet->completion.send.send_completion_ctx = req;/* packet; */
packet->completion.send.send_completion =
rndis_filter_send_request_completion;
packet->completion.send.send_completion_tid = (unsigned long)dev;
packet->completion.send.send_completion = NULL;
ret = netvsc_send(dev->net_dev->dev, packet);
return ret;
@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx)
/* Pass it back to the original handler */
filter_pkt->completion(filter_pkt->completion_ctx);
}
static void rndis_filter_send_request_completion(void *ctx)
{
/* Noop */
}

View File

@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
if (r) {
ath_err(common,
"Unable to reset channel, reset status %d\n", r);
ath9k_hw_enable_interrupts(ah);
ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
goto out;
}

View File

@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
goto err;
}
/* External image takes precedence if specified */
if (brcmf_sdbrcm_download_code_file(bus)) {
brcmf_err("dongle image file download failed\n");
goto err;
}
/* External nvram takes precedence if specified */
if (brcmf_sdbrcm_download_nvram(bus))
if (brcmf_sdbrcm_download_nvram(bus)) {
brcmf_err("dongle nvram file download failed\n");
goto err;
}
/* Take arm out of reset */
if (brcmf_sdbrcm_download_state(bus, false)) {

View File

@ -1891,8 +1891,10 @@ static s32
brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key key;
s32 err = 0;
u8 keybuf[8];
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
u8 keybuf[8];
if ((ifp->vif->mode != WL_MODE_AP) &&
(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
break;
case WLAN_CIPHER_SUITE_TKIP:
if (ifp->vif->mode != WL_MODE_AP) {
brcmf_dbg(CONN, "Swapping key\n");
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
err = -EAGAIN;
goto done;
}
switch (wsec & ~SES_OW_ENABLED) {
case WEP_ENABLED:
if (wsec & WEP_ENABLED) {
sec = &profile->sec;
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
params.cipher = WLAN_CIPHER_SUITE_WEP104;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
}
break;
case TKIP_ENABLED:
} else if (wsec & TKIP_ENABLED) {
params.cipher = WLAN_CIPHER_SUITE_TKIP;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
break;
case AES_ENABLED:
} else if (wsec & AES_ENABLED) {
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
default:
} else {
brcmf_err("Invalid algo (0x%x)\n", wsec);
err = -EINVAL;
goto done;
@ -3824,8 +3823,9 @@ exit:
static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = -EPERM;
s32 err;
struct brcmf_fil_bss_enable_le bss_enable;
struct brcmf_join_params join_params;
brcmf_dbg(TRACE, "Enter\n");
@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
if (err < 0) {
brcmf_err("setting AP mode failed %d\n", err);
goto exit;
}
memset(&join_params, 0, sizeof(join_params));
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, sizeof(join_params));
if (err < 0)
brcmf_err("SET SSID error (%d)\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
if (err < 0) {
if (err < 0)
brcmf_err("BRCMF_C_UP error %d\n", err);
goto exit;
}
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
if (err < 0)
brcmf_err("setting AP mode failed %d\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
if (err < 0)
brcmf_err("setting INFRA mode failed %d\n", err);
} else {
bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
bss_enable.enable = cpu_to_le32(0);
@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
exit:
return err;
}

View File

@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
queue_work(adapter->workqueue, &adapter->main_work);
/* Perform internal scan synchronously */
if (!priv->scan_request)
if (!priv->scan_request) {
dev_dbg(adapter->dev, "wait internal scan\n");
mwifiex_wait_queue_complete(adapter, cmd_node);
}
} else {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
@ -1793,7 +1795,12 @@ check_next_scan:
/* Need to indicate IOCTL complete */
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
if (!priv->scan_request) {
dev_dbg(adapter->dev,
"complete internal scan\n");
mwifiex_complete_cmd(adapter,
adapter->curr_cmd);
}
}
if (priv->report_scan_result)
priv->report_scan_result = false;

View File

@ -20,6 +20,7 @@ if RT2X00
config RT2400PCI
tristate "Ralink rt2400 (PCI/PCMCIA) support"
depends on PCI
select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI
select EEPROM_93CX6
---help---
@ -31,6 +32,7 @@ config RT2400PCI
config RT2500PCI
tristate "Ralink rt2500 (PCI/PCMCIA) support"
depends on PCI
select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI
select EEPROM_93CX6
---help---
@ -43,6 +45,7 @@ config RT61PCI
tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
depends on PCI
select RT2X00_LIB_PCI
select RT2X00_LIB_MMIO
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_ITU_T
@ -57,6 +60,7 @@ config RT2800PCI
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
depends on PCI || SOC_RT288X || SOC_RT305X
select RT2800_LIB
select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI if PCI
select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
select RT2X00_LIB_FIRMWARE
@ -185,6 +189,9 @@ endif
config RT2800_LIB
tristate
config RT2X00_LIB_MMIO
tristate
config RT2X00_LIB_PCI
tristate
select RT2X00_LIB

View File

@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o
obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o

View File

@ -34,6 +34,7 @@
#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2400pci.h"

View File

@ -34,6 +34,7 @@
#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2500pci.h"

View File

@ -41,6 +41,7 @@
#include <linux/eeprom_93cx6.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2x00soc.h"
#include "rt2800lib.h"

View File

@ -0,0 +1,216 @@
/*
Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
<http://rt2x00.serialmonkey.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the
Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
Module: rt2x00mmio
Abstract: rt2x00 generic mmio device routines.
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
/*
* Register access.
*/
int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
const struct rt2x00_field32 field,
u32 *reg)
{
unsigned int i;
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2x00pci_register_read(rt2x00dev, offset, reg);
if (!rt2x00_get_field32(*reg, field))
return 1;
udelay(REGISTER_BUSY_DELAY);
}
printk_once(KERN_ERR "%s() Indirect register access failed: "
"offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
*reg = ~0;
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue = rt2x00dev->rx;
struct queue_entry *entry;
struct queue_entry_priv_pci *entry_priv;
struct skb_frame_desc *skbdesc;
int max_rx = 16;
while (--max_rx) {
entry = rt2x00queue_get_entry(queue, Q_INDEX);
entry_priv = entry->priv_data;
if (rt2x00dev->ops->lib->get_entry_state(entry))
break;
/*
* Fill in desc fields of the skb descriptor
*/
skbdesc = get_skb_frame_desc(entry->skb);
skbdesc->desc = entry_priv->desc;
skbdesc->desc_len = entry->queue->desc_size;
/*
* DMA is already done, notify rt2x00lib that
* it finished successfully.
*/
rt2x00lib_dmastart(entry);
rt2x00lib_dmadone(entry);
/*
* Send the frame to rt2x00lib for further processing.
*/
rt2x00lib_rxdone(entry, GFP_ATOMIC);
}
return !max_rx;
}
EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
{
unsigned int i;
for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
msleep(10);
}
EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
/*
* Device initialization handlers.
*/
static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue)
{
struct queue_entry_priv_pci *entry_priv;
void *addr;
dma_addr_t dma;
unsigned int i;
/*
* Allocate DMA memory for descriptor and buffer.
*/
addr = dma_alloc_coherent(rt2x00dev->dev,
queue->limit * queue->desc_size,
&dma, GFP_KERNEL);
if (!addr)
return -ENOMEM;
memset(addr, 0, queue->limit * queue->desc_size);
/*
* Initialize all queue entries to contain valid addresses.
*/
for (i = 0; i < queue->limit; i++) {
entry_priv = queue->entries[i].priv_data;
entry_priv->desc = addr + i * queue->desc_size;
entry_priv->desc_dma = dma + i * queue->desc_size;
}
return 0;
}
static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue)
{
struct queue_entry_priv_pci *entry_priv =
queue->entries[0].priv_data;
if (entry_priv->desc)
dma_free_coherent(rt2x00dev->dev,
queue->limit * queue->desc_size,
entry_priv->desc, entry_priv->desc_dma);
entry_priv->desc = NULL;
}
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
int status;
/*
* Allocate DMA
*/
queue_for_each(rt2x00dev, queue) {
status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
if (status)
goto exit;
}
/*
* Register interrupt handler.
*/
status = request_irq(rt2x00dev->irq,
rt2x00dev->ops->lib->irq_handler,
IRQF_SHARED, rt2x00dev->name, rt2x00dev);
if (status) {
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
rt2x00dev->irq, status);
goto exit;
}
return 0;
exit:
queue_for_each(rt2x00dev, queue)
rt2x00pci_free_queue_dma(rt2x00dev, queue);
return status;
}
EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
/*
* Free irq line.
*/
free_irq(rt2x00dev->irq, rt2x00dev);
/*
* Free DMA
*/
queue_for_each(rt2x00dev, queue)
rt2x00pci_free_queue_dma(rt2x00dev, queue);
}
EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
/*
* rt2x00mmio module information.
*/
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("rt2x00 mmio library");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,119 @@
/*
Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
<http://rt2x00.serialmonkey.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the
Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
Module: rt2x00mmio
Abstract: Data structures for the rt2x00mmio module.
*/
#ifndef RT2X00MMIO_H
#define RT2X00MMIO_H
#include <linux/io.h>
/*
* Register access.
*/
static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u32 *value)
{
*value = readl(rt2x00dev->csr.base + offset);
}
static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
void *value, const u32 length)
{
memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
}
static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u32 value)
{
writel(value, rt2x00dev->csr.base + offset);
}
static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
const void *value,
const u32 length)
{
__iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
}
/**
* rt2x00pci_regbusy_read - Read from register with busy check
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
* @offset: Register offset
* @field: Field to check if register is busy
* @reg: Pointer to where register contents should be stored
*
* This function will read the given register, and checks if the
* register is busy. If it is, it will sleep for a couple of
* microseconds before reading the register again. If the register
* is not read after a certain timeout, this function will return
* FALSE.
*/
int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
const struct rt2x00_field32 field,
u32 *reg);
/**
* struct queue_entry_priv_pci: Per entry PCI specific information
*
* @desc: Pointer to device descriptor
* @desc_dma: DMA pointer to &desc.
* @data: Pointer to device's entry memory.
* @data_dma: DMA pointer to &data.
*/
struct queue_entry_priv_pci {
__le32 *desc;
dma_addr_t desc_dma;
};
/**
* rt2x00pci_rxdone - Handle RX done events
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
*
* Returns true if there are still rx frames pending and false if all
* pending rx frames were processed.
*/
bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
/**
* rt2x00pci_flush_queue - Flush data queue
* @queue: Data queue to stop
* @drop: True to drop all pending frames.
*
* This will wait for a maximum of 100ms, waiting for the queues
* to become empty.
*/
void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
/*
* Device initialization handlers.
*/
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
#endif /* RT2X00MMIO_H */

View File

@ -32,182 +32,6 @@
#include "rt2x00.h"
#include "rt2x00pci.h"
/*
* Register access.
*/
int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
const struct rt2x00_field32 field,
u32 *reg)
{
unsigned int i;
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2x00pci_register_read(rt2x00dev, offset, reg);
if (!rt2x00_get_field32(*reg, field))
return 1;
udelay(REGISTER_BUSY_DELAY);
}
ERROR(rt2x00dev, "Indirect register access failed: "
"offset=0x%.08x, value=0x%.08x\n", offset, *reg);
*reg = ~0;
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue = rt2x00dev->rx;
struct queue_entry *entry;
struct queue_entry_priv_pci *entry_priv;
struct skb_frame_desc *skbdesc;
int max_rx = 16;
while (--max_rx) {
entry = rt2x00queue_get_entry(queue, Q_INDEX);
entry_priv = entry->priv_data;
if (rt2x00dev->ops->lib->get_entry_state(entry))
break;
/*
* Fill in desc fields of the skb descriptor
*/
skbdesc = get_skb_frame_desc(entry->skb);
skbdesc->desc = entry_priv->desc;
skbdesc->desc_len = entry->queue->desc_size;
/*
* DMA is already done, notify rt2x00lib that
* it finished successfully.
*/
rt2x00lib_dmastart(entry);
rt2x00lib_dmadone(entry);
/*
* Send the frame to rt2x00lib for further processing.
*/
rt2x00lib_rxdone(entry, GFP_ATOMIC);
}
return !max_rx;
}
EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
{
unsigned int i;
for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
msleep(10);
}
EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
/*
* Device initialization handlers.
*/
static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue)
{
struct queue_entry_priv_pci *entry_priv;
void *addr;
dma_addr_t dma;
unsigned int i;
/*
* Allocate DMA memory for descriptor and buffer.
*/
addr = dma_alloc_coherent(rt2x00dev->dev,
queue->limit * queue->desc_size,
&dma, GFP_KERNEL);
if (!addr)
return -ENOMEM;
memset(addr, 0, queue->limit * queue->desc_size);
/*
* Initialize all queue entries to contain valid addresses.
*/
for (i = 0; i < queue->limit; i++) {
entry_priv = queue->entries[i].priv_data;
entry_priv->desc = addr + i * queue->desc_size;
entry_priv->desc_dma = dma + i * queue->desc_size;
}
return 0;
}
static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue)
{
struct queue_entry_priv_pci *entry_priv =
queue->entries[0].priv_data;
if (entry_priv->desc)
dma_free_coherent(rt2x00dev->dev,
queue->limit * queue->desc_size,
entry_priv->desc, entry_priv->desc_dma);
entry_priv->desc = NULL;
}
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
int status;
/*
* Allocate DMA
*/
queue_for_each(rt2x00dev, queue) {
status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
if (status)
goto exit;
}
/*
* Register interrupt handler.
*/
status = request_irq(rt2x00dev->irq,
rt2x00dev->ops->lib->irq_handler,
IRQF_SHARED, rt2x00dev->name, rt2x00dev);
if (status) {
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
rt2x00dev->irq, status);
goto exit;
}
return 0;
exit:
queue_for_each(rt2x00dev, queue)
rt2x00pci_free_queue_dma(rt2x00dev, queue);
return status;
}
EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
/*
* Free irq line.
*/
free_irq(rt2x00dev->irq, rt2x00dev);
/*
* Free DMA
*/
queue_for_each(rt2x00dev, queue)
rt2x00pci_free_queue_dma(rt2x00dev, queue);
}
EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
/*
* PCI driver handlers.
*/

View File

@ -35,94 +35,6 @@
*/
#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
/*
* Register access.
*/
static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u32 *value)
{
*value = readl(rt2x00dev->csr.base + offset);
}
static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
void *value, const u32 length)
{
memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
}
static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
u32 value)
{
writel(value, rt2x00dev->csr.base + offset);
}
static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
const void *value,
const u32 length)
{
__iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
}
/**
* rt2x00pci_regbusy_read - Read from register with busy check
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
* @offset: Register offset
* @field: Field to check if register is busy
* @reg: Pointer to where register contents should be stored
*
* This function will read the given register, and checks if the
* register is busy. If it is, it will sleep for a couple of
* microseconds before reading the register again. If the register
* is not read after a certain timeout, this function will return
* FALSE.
*/
int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
const struct rt2x00_field32 field,
u32 *reg);
/**
* struct queue_entry_priv_pci: Per entry PCI specific information
*
* @desc: Pointer to device descriptor
* @desc_dma: DMA pointer to &desc.
* @data: Pointer to device's entry memory.
* @data_dma: DMA pointer to &data.
*/
struct queue_entry_priv_pci {
__le32 *desc;
dma_addr_t desc_dma;
};
/**
* rt2x00pci_rxdone - Handle RX done events
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
*
* Returns true if there are still rx frames pending and false if all
* pending rx frames were processed.
*/
bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
/**
* rt2x00pci_flush_queue - Flush data queue
* @queue: Data queue to stop
* @drop: True to drop all pending frames.
*
* This will wait for a maximum of 100ms, waiting for the queues
* to become empty.
*/
void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
/*
* Device initialization handlers.
*/
int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
/*
* PCI driver handlers.
*/

View File

@ -35,6 +35,7 @@
#include <linux/eeprom_93cx6.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt61pci.h"

View File

@ -769,6 +769,7 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
struct task_struct *recovery_task;
spinlock_t ip_lock;
struct list_head ip_list;
struct list_head *ip_tbd_list;
@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
void qeth_set_recovery_task(struct qeth_card *);
void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
int qeth_wait_for_threads(struct qeth_card *, unsigned long);

View File

@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "n/a";
}
void qeth_set_recovery_task(struct qeth_card *card)
{
card->recovery_task = current;
}
EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
void qeth_clear_recovery_task(struct qeth_card *card)
{
card->recovery_task = NULL;
}
EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
static bool qeth_is_recovery_task(const struct qeth_card *card)
{
return card->recovery_task == current;
}
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask)
{
@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);
int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
{
if (qeth_is_recovery_task(card))
return 0;
return wait_event_interruptible(card->wait_q,
qeth_threads_running(card, threads) == 0);
}

View File

@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
qeth_set_recovery_task(card);
__qeth_l2_set_offline(card->gdev, 1);
rc = __qeth_l2_set_online(card->gdev, 1);
if (!rc)
@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;

View File

@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
qeth_set_recovery_task(card);
__qeth_l3_set_offline(card->gdev, 1);
rc = __qeth_l3_set_online(card->gdev, 1);
if (!rc)
@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;

View File

@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* This hook can be used by the module to update any security state
* associated with the TUN device's security structure.
* @security pointer to the TUN devices's security structure.
* @skb_owned_by:
* This hook sets the packet's owning sock.
* @skb is the packet.
* @sk the sock which owns the packet.
*
* Security hooks for XFRM operations.
*
@ -1638,6 +1642,7 @@ struct security_operations {
int (*tun_dev_attach_queue) (void *security);
int (*tun_dev_attach) (struct sock *sk, void *security);
int (*tun_dev_open) (void *security);
void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security)
{
return 0;
}
static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
{
}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM

View File

@ -130,6 +130,14 @@ struct iucv_sock {
enum iucv_tx_notify n);
};
struct iucv_skb_cb {
u32 class; /* target class of message */
u32 tag; /* tag associated with message */
u32 offset; /* offset for skb receival */
};
#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0]))
/* iucv socket options (SOL_IUCV) */
#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */

View File

@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
int copied, error = -EINVAL;
msg->msg_namelen = 0;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;

View File

@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
memset(sax, 0, sizeof(struct full_sockaddr_ax25));
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;

View File

@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
msg->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;

View File

@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
msg->msg_namelen = 0;
return 0;
}

View File

@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
hci_conn_accept(pi->conn->hcon, 0);
sk->sk_state = BT_CONFIG;
msg->msg_namelen = 0;
release_sock(sk);
return 0;

View File

@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
if (m->msg_flags&MSG_OOB)
goto read_error;
m->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;

View File

@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb,
if (gwj->src.dev == dev || gwj->dst.dev == dev) {
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
kfree(gwj);
kmem_cache_free(cgw_cache, gwj);
}
}
}
@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void)
hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
kfree(gwj);
kmem_cache_free(cgw_cache, gwj);
}
}
@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
kfree(gwj);
kmem_cache_free(cgw_cache, gwj);
err = 0;
break;
}

View File

@ -1072,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
cb->seq = net->dev_base_seq;
if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
@ -1922,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
u32 ext_filter_mask = 0;
u16 min_ifinfo_dump_size = 0;
if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);

View File

@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work)
{
unsigned long now, next, next_sec, next_sched;
struct in_ifaddr *ifa;
struct hlist_node *n;
int i;
now = jiffies;
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
rcu_read_lock();
for (i = 0; i < IN4_ADDR_HSIZE; i++) {
bool change_needed = false;
rcu_read_lock();
hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
unsigned long age;
@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work)
if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
age >= ifa->ifa_valid_lft) {
struct in_ifaddr **ifap ;
rtnl_lock();
for (ifap = &ifa->ifa_dev->ifa_list;
*ifap != NULL; ifap = &ifa->ifa_next) {
if (*ifap == ifa)
inet_del_ifa(ifa->ifa_dev,
ifap, 1);
}
rtnl_unlock();
change_needed = true;
} else if (ifa->ifa_preferred_lft ==
INFINITY_LIFE_TIME) {
continue;
@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work)
next = ifa->ifa_tstamp +
ifa->ifa_valid_lft * HZ;
if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) {
ifa->ifa_flags |= IFA_F_DEPRECATED;
rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
}
if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
change_needed = true;
} else if (time_before(ifa->ifa_tstamp +
ifa->ifa_preferred_lft * HZ,
next)) {
@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work)
ifa->ifa_preferred_lft * HZ;
}
}
rcu_read_unlock();
if (!change_needed)
continue;
rtnl_lock();
hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
unsigned long age;
if (ifa->ifa_flags & IFA_F_PERMANENT)
continue;
/* We try to batch several events at once. */
age = (now - ifa->ifa_tstamp +
ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
age >= ifa->ifa_valid_lft) {
struct in_ifaddr **ifap;
for (ifap = &ifa->ifa_dev->ifa_list;
*ifap != NULL; ifap = &(*ifap)->ifa_next) {
if (*ifap == ifa) {
inet_del_ifa(ifa->ifa_dev,
ifap, 1);
break;
}
}
} else if (ifa->ifa_preferred_lft !=
INFINITY_LIFE_TIME &&
age >= ifa->ifa_preferred_lft &&
!(ifa->ifa_flags & IFA_F_DEPRECATED)) {
ifa->ifa_flags |= IFA_F_DEPRECATED;
rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
}
}
rtnl_unlock();
}
rcu_read_unlock();
next_sec = round_jiffies_up(next);
next_sched = next;
@ -804,6 +830,8 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
return -EEXIST;
ifa = ifa_existing;
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
cancel_delayed_work(&check_lifetime_work);
schedule_delayed_work(&check_lifetime_work, 0);
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
}

View File

@ -2709,6 +2709,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
skb_reserve(skb, MAX_TCP_HEADER);
skb_dst_set(skb, dst);
security_skb_owned_by(skb, sk);
mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)

View File

@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (dst)
dst->ops->redirect(dst, sk, skb);
goto out;
}
if (type == ICMPV6_PKT_TOOBIG) {

View File

@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
IRDA_DEBUG(4, "%s()\n", __func__);
msg->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)

View File

@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =
#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
/* macros to set/get socket control buffer at correct offset */
#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
#define CB_TRGCLS_LEN (TRGCLS_SIZE)
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
DEFINE_WAIT(__wait); \
@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
IUCV_SKB_CB(skb)->tag = txmsg.tag;
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_sent);
@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
return -ENOMEM;
/* copy target class to control buffer of new skb */
memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
/* copy data fragment */
memcpy(nskb->data, skb->data + copied, size);
@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
/* store msg target class in the second 4 bytes of skb ctrl buffer */
/* Note: the first 4 bytes are reserved for msg tag */
memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
IUCV_SKB_CB(skb)->class = msg->class;
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
}
}
IUCV_SKB_CB(skb)->offset = 0;
if (sock_queue_rcv_skb(sk, skb))
skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
}
@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned int copied, rlen;
struct sk_buff *skb, *rskb, *cskb;
int err = 0;
u32 offset;
msg->msg_namelen = 0;
if ((sk->sk_state == IUCV_DISCONN) &&
skb_queue_empty(&iucv->backlog_skb_q) &&
@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
rlen = skb->len; /* real length of skb */
offset = IUCV_SKB_CB(skb)->offset;
rlen = skb->len - offset; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
if (!rlen)
sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
* get the trgcls from the control buffer of the skb due to
* fragmentation of original iucv message. */
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
CB_TRGCLS_LEN, CB_TRGCLS(skb));
sizeof(IUCV_SKB_CB(skb)->class),
(void *)&IUCV_SKB_CB(skb)->class);
if (err) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
if (sk->sk_type == SOCK_STREAM) {
skb_pull(skb, copied);
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (copied < rlen) {
IUCV_SKB_CB(skb)->offset = offset + copied;
goto done;
}
}
@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
spin_lock_bh(&iucv->message_q.lock);
rskb = skb_dequeue(&iucv->backlog_skb_q);
while (rskb) {
IUCV_SKB_CB(rskb)->offset = 0;
if (sock_queue_rcv_skb(sk, rskb)) {
skb_queue_head(&iucv->backlog_skb_q,
rskb);
@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
spin_lock_irqsave(&list->lock, flags);
while (list_skb != (struct sk_buff *)list) {
if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
this = list_skb;
break;
}
@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
IUCV_SKB_CB(skb)->offset = 0;
spin_lock(&iucv->message_q.lock);
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (sock_queue_rcv_skb(sk, skb)) {
@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
/* fall through and receive zero length data */
case 0:
/* plain data frame */
memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
CB_TRGCLS_LEN);
IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
err = afiucv_hs_callback_rx(sk, skb);
break;
default:

View File

@ -690,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
lsa->l2tp_conn_id = 0;
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = IP6CB(skb)->iif;
}

View File

@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
int target; /* Read at least this many bytes */
long timeo;
msg->msg_namelen = 0;
lock_sock(sk);
copied = -ENOTCONN;
if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))

View File

@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
}
if (sax != NULL) {
memset(sax, 0, sizeof(*sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);

View File

@ -646,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
pr_debug("%p %zu\n", sk, len);
msg->msg_namelen = 0;
lock_sock(sk);
if (sk->sk_state == LLCP_CLOSED &&
@ -691,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
memset(sockaddr, 0, sizeof(*sockaddr));
sockaddr->sa_family = AF_NFC;
sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;
sockaddr->dsap = ui_cb->dsap;

View File

@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (srose != NULL) {
memset(srose, 0, msg->msg_namelen);
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;

View File

@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
if (addr) {
addr->family = AF_TIPC;
addr->addrtype = TIPC_ADDR_ID;
memset(&addr->addr, 0, sizeof(addr->addr));
addr->addr.id.ref = msg_origport(msg);
addr->addr.id.node = msg_orignode(msg);
addr->addr.name.domain = 0; /* could leave uninitialized */
@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
goto exit;
}
/* will be updated in set_orig_addr() if needed */
m->msg_namelen = 0;
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:
@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
goto exit;
}
/* will be updated in set_orig_addr() if needed */
m->msg_namelen = 0;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

View File

@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
vsk = vsock_sk(sk);
err = 0;
msg->msg_namelen = 0;
lock_sock(sk);
if (sk->sk_state != SS_CONNECTED) {

View File

@ -1736,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
return -EOPNOTSUPP;
msg->msg_namelen = 0;
/* Retrieve the head sk_buff from the socket's receive queue. */
err = 0;
skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
@ -1768,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
if (err)
goto out;
msg->msg_namelen = 0;
if (msg->msg_name) {
struct sockaddr_vm *vm_addr;

View File

@ -224,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work)
rtnl_lock();
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
mutex_lock(&rdev->sched_scan_mtx);
list_for_each_entry(wdev, &rdev->wdev_list, list) {
wdev_lock(wdev);
@ -248,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work)
wdev_unlock(wdev);
}
mutex_unlock(&rdev->sched_scan_mtx);
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
rtnl_unlock();

View File

@ -737,6 +737,11 @@ static int cap_tun_dev_open(void *security)
{
return 0;
}
static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
{
}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@ -1071,6 +1076,7 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, tun_dev_open);
set_to_cap_if_null(ops, tun_dev_attach_queue);
set_to_cap_if_null(ops, tun_dev_attach);
set_to_cap_if_null(ops, skb_owned_by);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
set_to_cap_if_null(ops, xfrm_policy_alloc_security);

View File

@ -1290,6 +1290,11 @@ int security_tun_dev_open(void *security)
}
EXPORT_SYMBOL(security_tun_dev_open);
void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
{
security_ops->skb_owned_by(skb, sk);
}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM

View File

@ -51,6 +51,7 @@
#include <linux/tty.h>
#include <net/icmp.h>
#include <net/ip.h> /* for local_port_range[] */
#include <net/sock.h>
#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
#include <net/net_namespace.h>
#include <net/netlabel.h>
@ -4363,6 +4364,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
}
static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
{
skb_set_owner_w(skb, sk);
}
static int selinux_secmark_relabel_packet(u32 sid)
{
const struct task_security_struct *__tsec;
@ -5664,6 +5670,7 @@ static struct security_operations selinux_ops = {
.tun_dev_attach_queue = selinux_tun_dev_attach_queue,
.tun_dev_attach = selinux_tun_dev_attach,
.tun_dev_open = selinux_tun_dev_open,
.skb_owned_by = selinux_skb_owned_by,
#ifdef CONFIG_SECURITY_NETWORK_XFRM
.xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,