diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt index ca0911a20e8b..6e356d15154a 100644 --- a/Documentation/devicetree/bindings/net/davinci_emac.txt +++ b/Documentation/devicetree/bindings/net/davinci_emac.txt @@ -10,8 +10,6 @@ Required properties: - ti,davinci-ctrl-mod-reg-offset: offset to control module register - ti,davinci-ctrl-ram-offset: offset to control module ram - ti,davinci-ctrl-ram-size: size of control module ram -- ti,davinci-rmii-en: use RMII -- ti,davinci-no-bd-ram: has the emac controller BD RAM - interrupts: interrupt mapping for the davinci emac interrupts sources: 4 sources: > PAGE_SHIFT; - pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT; /* do we need a gpadl body msg */ pfnsize = MAX_SIZE_CHANNEL_MESSAGE - @@ -248,7 +246,8 @@ static int create_gpadl_header(void *kbuffer, u32 size, gpadl_header->range[0].byte_offset = 0; gpadl_header->range[0].byte_count = size; for (i = 0; i < pfncount; i++) - gpadl_header->range[0].pfn_array[i] = pfn+i; + gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( + kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; *msginfo = msgheader; *messagecount = 1; @@ -301,7 +300,9 @@ static int create_gpadl_header(void *kbuffer, u32 size, * so the hypervisor gurantees that this is ok. */ for (i = 0; i < pfncurr; i++) - gpadl_body->pfn[i] = pfn + pfnsum + i; + gpadl_body->pfn[i] = slow_virt_to_phys( + kbuffer + PAGE_SIZE * (pfnsum + i)) >> + PAGE_SHIFT; /* add to msg header */ list_add_tail(&msgbody->msglistentry, @@ -327,7 +328,8 @@ static int create_gpadl_header(void *kbuffer, u32 size, gpadl_header->range[0].byte_offset = 0; gpadl_header->range[0].byte_count = size; for (i = 0; i < pagecount; i++) - gpadl_header->range[0].pfn_array[i] = pfn+i; + gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( + kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; *msginfo = msgheader; *messagecount = 1; @@ -344,7 +346,7 @@ nomem: * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer * * @channel: a channel - * @kbuffer: from kmalloc + * @kbuffer: from kmalloc or vmalloc * @size: page-size multiple * @gpadl_handle: some funky thing */ diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a7db819bca92..4c08018d7333 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2346,7 +2346,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) arp_work.work); struct slave *slave, *oldcurrent; struct list_head *iter; - int do_failover = 0; + int do_failover = 0, slave_state_changed = 0; if (!bond_has_slaves(bond)) goto re_arm; @@ -2370,7 +2370,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) bond_time_in_interval(bond, slave->dev->last_rx, 1)) { slave->link = BOND_LINK_UP; - bond_set_active_slave(slave); + slave_state_changed = 1; /* primary_slave has no meaning in round-robin * mode. the window of a slave being up and @@ -2399,7 +2399,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) !bond_time_in_interval(bond, slave->dev->last_rx, 2)) { slave->link = BOND_LINK_DOWN; - bond_set_backup_slave(slave); + slave_state_changed = 1; if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; @@ -2426,19 +2426,24 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) rcu_read_unlock(); - if (do_failover) { - /* the bond_select_active_slave must hold RTNL - * and curr_slave_lock for write. - */ + if (do_failover || slave_state_changed) { if (!rtnl_trylock()) goto re_arm; - block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); - bond_select_active_slave(bond); + if (slave_state_changed) { + bond_slave_state_change(bond); + } else if (do_failover) { + /* the bond_select_active_slave must hold RTNL + * and curr_slave_lock for write. + */ + block_netpoll_tx(); + write_lock_bh(&bond->curr_slave_lock); - write_unlock_bh(&bond->curr_slave_lock); - unblock_netpoll_tx(); + bond_select_active_slave(bond); + + write_unlock_bh(&bond->curr_slave_lock); + unblock_netpoll_tx(); + } rtnl_unlock(); } @@ -2599,45 +2604,51 @@ do_failover: /* * Send ARP probes for active-backup mode ARP monitor. - * - * Called with rcu_read_lock hold. */ -static void bond_ab_arp_probe(struct bonding *bond) +static bool bond_ab_arp_probe(struct bonding *bond) { struct slave *slave, *before = NULL, *new_slave = NULL, - *curr_arp_slave = rcu_dereference(bond->current_arp_slave); + *curr_arp_slave, *curr_active_slave; struct list_head *iter; bool found = false; - read_lock(&bond->curr_slave_lock); + rcu_read_lock(); + curr_arp_slave = rcu_dereference(bond->current_arp_slave); + curr_active_slave = rcu_dereference(bond->curr_active_slave); - if (curr_arp_slave && bond->curr_active_slave) + if (curr_arp_slave && curr_active_slave) pr_info("PROBE: c_arp %s && cas %s BAD\n", curr_arp_slave->dev->name, - bond->curr_active_slave->dev->name); + curr_active_slave->dev->name); - if (bond->curr_active_slave) { - bond_arp_send_all(bond, bond->curr_active_slave); - read_unlock(&bond->curr_slave_lock); - return; + if (curr_active_slave) { + bond_arp_send_all(bond, curr_active_slave); + rcu_read_unlock(); + return true; } - - read_unlock(&bond->curr_slave_lock); + rcu_read_unlock(); /* if we don't have a curr_active_slave, search for the next available * backup slave from the current_arp_slave and make it the candidate * for becoming the curr_active_slave */ + if (!rtnl_trylock()) + return false; + /* curr_arp_slave might have gone away */ + curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave); + if (!curr_arp_slave) { - curr_arp_slave = bond_first_slave_rcu(bond); - if (!curr_arp_slave) - return; + curr_arp_slave = bond_first_slave(bond); + if (!curr_arp_slave) { + rtnl_unlock(); + return true; + } } bond_set_slave_inactive_flags(curr_arp_slave); - bond_for_each_slave_rcu(bond, slave, iter) { + bond_for_each_slave(bond, slave, iter) { if (!found && !before && IS_UP(slave->dev)) before = slave; @@ -2667,21 +2678,26 @@ static void bond_ab_arp_probe(struct bonding *bond) if (!new_slave && before) new_slave = before; - if (!new_slave) - return; + if (!new_slave) { + rtnl_unlock(); + return true; + } new_slave->link = BOND_LINK_BACK; bond_set_slave_active_flags(new_slave); bond_arp_send_all(bond, new_slave); new_slave->jiffies = jiffies; rcu_assign_pointer(bond->current_arp_slave, new_slave); + rtnl_unlock(); + + return true; } static void bond_activebackup_arp_mon(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, arp_work.work); - bool should_notify_peers = false; + bool should_notify_peers = false, should_commit = false; int delta_in_ticks; delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); @@ -2690,12 +2706,11 @@ static void bond_activebackup_arp_mon(struct work_struct *work) goto re_arm; rcu_read_lock(); - should_notify_peers = bond_should_notify_peers(bond); + should_commit = bond_ab_arp_inspect(bond); + rcu_read_unlock(); - if (bond_ab_arp_inspect(bond)) { - rcu_read_unlock(); - + if (should_commit) { /* Race avoidance with bond_close flush of workqueue */ if (!rtnl_trylock()) { delta_in_ticks = 1; @@ -2704,13 +2719,14 @@ static void bond_activebackup_arp_mon(struct work_struct *work) } bond_ab_arp_commit(bond); - rtnl_unlock(); - rcu_read_lock(); } - bond_ab_arp_probe(bond); - rcu_read_unlock(); + if (!bond_ab_arp_probe(bond)) { + /* rtnl locking failed, re-arm */ + delta_in_ticks = 1; + should_notify_peers = false; + } re_arm: if (bond->params.arp_interval) diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 1a9062f4e0d6..86ccfb9f71cc 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -303,6 +303,19 @@ static inline void bond_set_backup_slave(struct slave *slave) } } +static inline void bond_slave_state_change(struct bonding *bond) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) { + if (tmp->link == BOND_LINK_UP) + bond_set_active_slave(tmp); + else if (tmp->link == BOND_LINK_DOWN) + bond_set_backup_slave(tmp); + } +} + static inline int bond_slave_state(struct slave *slave) { return slave->backup; diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c index 811fa5d5c697..30104b60da85 100644 --- a/drivers/net/ethernet/8390/apne.c +++ b/drivers/net/ethernet/8390/apne.c @@ -212,7 +212,6 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) int neX000, ctron; #endif static unsigned version_printed; - struct ei_device *ei_local = netdev_priv(dev); if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) netdev_info(dev, version); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 92a467ff4104..38fc794c1655 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -358,49 +358,47 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cfg_idx = bnx2x_get_link_cfg_idx(bp); old_multi_phy_config = bp->link_params.multi_phy_config; - switch (cmd->port) { - case PORT_TP: - if (bp->port.supported[cfg_idx] & SUPPORTED_TP) - break; /* no port change */ - - if (!(bp->port.supported[0] & SUPPORTED_TP || - bp->port.supported[1] & SUPPORTED_TP)) { + if (cmd->port != bnx2x_get_port_type(bp)) { + switch (cmd->port) { + case PORT_TP: + if (!(bp->port.supported[0] & SUPPORTED_TP || + bp->port.supported[1] & SUPPORTED_TP)) { + DP(BNX2X_MSG_ETHTOOL, + "Unsupported port type\n"); + return -EINVAL; + } + bp->link_params.multi_phy_config &= + ~PORT_HW_CFG_PHY_SELECTION_MASK; + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + else + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + break; + case PORT_FIBRE: + case PORT_DA: + if (!(bp->port.supported[0] & SUPPORTED_FIBRE || + bp->port.supported[1] & SUPPORTED_FIBRE)) { + DP(BNX2X_MSG_ETHTOOL, + "Unsupported port type\n"); + return -EINVAL; + } + bp->link_params.multi_phy_config &= + ~PORT_HW_CFG_PHY_SELECTION_MASK; + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + else + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + break; + default: DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); return -EINVAL; } - bp->link_params.multi_phy_config &= - ~PORT_HW_CFG_PHY_SELECTION_MASK; - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; - else - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; - break; - case PORT_FIBRE: - case PORT_DA: - if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) - break; /* no port change */ - - if (!(bp->port.supported[0] & SUPPORTED_FIBRE || - bp->port.supported[1] & SUPPORTED_FIBRE)) { - DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); - return -EINVAL; - } - bp->link_params.multi_phy_config &= - ~PORT_HW_CFG_PHY_SELECTION_MASK; - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; - else - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; - break; - default: - DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); - return -EINVAL; } /* Save new config in case command complete successfully */ new_multi_phy_config = bp->link_params.multi_phy_config; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e118a3ec62bc..c9c445e7b4a5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13102,9 +13102,9 @@ static void __bnx2x_remove(struct pci_dev *pdev, if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); - } - pci_disable_device(pdev); + pci_disable_device(pdev); + } } static void bnx2x_remove_one(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a4b940862b83..b901371ca361 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4440,9 +4440,10 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, /* Check if APP Table has changed */ if (memcmp(&new_cfg->app, &old_cfg->app, - sizeof(new_cfg->app))) + sizeof(new_cfg->app))) { need_reconfig = true; dev_info(&pf->pdev->dev, "APP Table change detected.\n"); + } return need_reconfig; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 6509935d145e..55a37ae11440 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5020,6 +5020,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); + err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "cannot register net device\n"); @@ -5028,8 +5030,6 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netif_carrier_off(dev); - netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); - sky2_show_addr(dev); if (hw->ports > 1) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 30874cda8476..54ebf300332a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -683,12 +683,17 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) adapter->ahw->linkup = 0; netif_carrier_off(netdev); } else if (!adapter->ahw->linkup && linkup) { - /* Do not advertise Link up if the port is in loopback mode */ - if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) + adapter->ahw->linkup = 1; + + /* Do not advertise Link up to the stack if device + * is in loopback mode + */ + if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) { + netdev_info(netdev, "NIC Link is up for loopback test\n"); return; + } netdev_info(netdev, "NIC Link is up\n"); - adapter->ahw->linkup = 1; netif_carrier_on(netdev); } } @@ -1150,13 +1155,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, u16 lro_length, length, data_offset, t_vid, vid = 0xffff; u32 seq_number; - if (unlikely(ring > adapter->max_rds_rings)) + if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_get_lro_sts_refhandle(sts_data0); - if (unlikely(index > rds_ring->num_desc)) + if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; @@ -1662,13 +1667,13 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, u16 vid = 0xffff; int err; - if (unlikely(ring > adapter->max_rds_rings)) + if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_83xx_hndl(sts_data[0]); - if (unlikely(index > rds_ring->num_desc)) + if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 1f79d47c45fa..ba78c7481fa3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1837,6 +1837,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) qlcnic_linkevent_request(adapter, 1); adapter->ahw->reset_context = 0; + netif_tx_start_all_queues(netdev); return 0; } @@ -2704,14 +2705,8 @@ static int qlcnic_open(struct net_device *netdev) err = __qlcnic_up(adapter, netdev); if (err) - goto err_out; + qlcnic_detach(adapter); - netif_tx_start_all_queues(netdev); - - return 0; - -err_out: - qlcnic_detach(adapter); return err; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 17a1ca2050f4..0638c1810d54 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -448,8 +448,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, return 0; } -static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter, - struct qlcnic_info *info) +static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_cmd_args cmd; @@ -495,10 +494,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) if (err) return -EIO; - err = qlcnic_sriov_get_vf_acl(adapter, &nic_info); - if (err) - return err; - if (qlcnic_83xx_get_port_info(adapter)) return -EIO; @@ -555,6 +550,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, if (err) goto err_out_send_channel_term; + err = qlcnic_sriov_get_vf_acl(adapter); + if (err) + goto err_out_send_channel_term; + err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); if (err) goto err_out_send_channel_term; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d93aa87408c2..a2e7d2c96e36 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1524,9 +1524,9 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) priv->dev->dev_addr, 0); if (!is_valid_ether_addr(priv->dev->dev_addr)) eth_hw_addr_random(priv->dev); + pr_info("%s: device MAC address %pM\n", priv->dev->name, + priv->dev->dev_addr); } - pr_warn("%s: device MAC address %pM\n", priv->dev->name, - priv->dev->dev_addr); } /** @@ -1635,7 +1635,7 @@ static int stmmac_hw_setup(struct net_device *dev) stmmac_mmc_setup(priv); ret = stmmac_init_ptp(priv); - if (ret) + if (ret && ret != -EOPNOTSUPP) pr_warn("%s: failed PTP initialisation\n", __func__); #ifdef CONFIG_STMMAC_DEBUG_FS diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a26eecb1212c..7b594ce3f21d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -462,7 +462,7 @@ struct nvsp_message { #define NETVSC_MTU 65536 -#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */ +#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ #define NETVSC_RECEIVE_BUFFER_ID 0xcafe diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 93b485b96249..03a2c6e17158 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -136,8 +136,7 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device) if (net_device->recv_buf) { /* Free up the receive buffer */ - free_pages((unsigned long)net_device->recv_buf, - get_order(net_device->recv_buf_size)); + vfree(net_device->recv_buf); net_device->recv_buf = NULL; } @@ -163,9 +162,7 @@ static int netvsc_init_recv_buf(struct hv_device *device) return -ENODEV; ndev = net_device->ndev; - net_device->recv_buf = - (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, - get_order(net_device->recv_buf_size)); + net_device->recv_buf = vzalloc(net_device->recv_buf_size); if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bcf01af4b879..44c4db8450f0 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -69,6 +69,7 @@ #include #include #include +#include #include @@ -2228,6 +2229,27 @@ static int tun_chr_close(struct inode *inode, struct file *file) return 0; } +#ifdef CONFIG_PROC_FS +static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f) +{ + struct tun_struct *tun; + struct ifreq ifr; + + memset(&ifr, 0, sizeof(ifr)); + + rtnl_lock(); + tun = tun_get(f); + if (tun) + tun_get_iff(current->nsproxy->net_ns, tun, &ifr); + rtnl_unlock(); + + if (tun) + tun_put(tun); + + return seq_printf(m, "iff:\t%s\n", ifr.ifr_name); +} +#endif + static const struct file_operations tun_fops = { .owner = THIS_MODULE, .llseek = no_llseek, @@ -2242,7 +2264,10 @@ static const struct file_operations tun_fops = { #endif .open = tun_chr_open, .release = tun_chr_close, - .fasync = tun_chr_fasync + .fasync = tun_chr_fasync, +#ifdef CONFIG_PROC_FS + .show_fdinfo = tun_chr_show_fdinfo, +#endif }; static struct miscdevice tun_miscdev = { diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e955c5692986..ff04d4f95baa 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -117,6 +117,7 @@ struct netfront_info { } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; + struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; spinlock_t rx_lock ____cacheline_aligned_in_smp; @@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev) gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; + np->grant_tx_page[id] = NULL; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); dev_kfree_skb_irq(skb); } @@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); + np->grant_tx_page[id] = virt_to_page(data); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; @@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, np->xbdev->otherend_id, mfn, GNTMAP_readonly); + np->grant_tx_page[id] = page; tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = bytes; @@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); + np->grant_tx_page[id] = virt_to_page(data); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; @@ -1085,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np) continue; skb = np->tx_skbs[i].skb; - gnttab_end_foreign_access_ref(np->grant_tx_ref[i], - GNTMAP_readonly); - gnttab_release_grant_reference(&np->gref_tx_head, - np->grant_tx_ref[i]); + get_page(np->grant_tx_page[i]); + gnttab_end_foreign_access(np->grant_tx_ref[i], + GNTMAP_readonly, + (unsigned long)page_address(np->grant_tx_page[i])); + np->grant_tx_page[i] = NULL; np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); dev_kfree_skb_irq(skb); @@ -1097,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np) static void xennet_release_rx_bufs(struct netfront_info *np) { - struct mmu_update *mmu = np->rx_mmu; - struct multicall_entry *mcl = np->rx_mcl; - struct sk_buff_head free_list; - struct sk_buff *skb; - unsigned long mfn; - int xfer = 0, noxfer = 0, unused = 0; int id, ref; - dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", - __func__); - return; - - skb_queue_head_init(&free_list); - spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { - ref = np->grant_rx_ref[id]; - if (ref == GRANT_INVALID_REF) { - unused++; - continue; - } + struct sk_buff *skb; + struct page *page; skb = np->rx_skbs[id]; - mfn = gnttab_end_foreign_transfer_ref(ref); - gnttab_release_grant_reference(&np->gref_rx_head, ref); + if (!skb) + continue; + + ref = np->grant_rx_ref[id]; + if (ref == GRANT_INVALID_REF) + continue; + + page = skb_frag_page(&skb_shinfo(skb)->frags[0]); + + /* gnttab_end_foreign_access() needs a page ref until + * foreign access is ended (which may be deferred). + */ + get_page(page); + gnttab_end_foreign_access(ref, 0, + (unsigned long)page_address(page)); np->grant_rx_ref[id] = GRANT_INVALID_REF; - if (0 == mfn) { - skb_shinfo(skb)->nr_frags = 0; - dev_kfree_skb(skb); - noxfer++; - continue; - } - - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - /* Remap the page. */ - const struct page *page = - skb_frag_page(&skb_shinfo(skb)->frags[0]); - unsigned long pfn = page_to_pfn(page); - void *vaddr = page_address(page); - - MULTI_update_va_mapping(mcl, (unsigned long)vaddr, - mfn_pte(mfn, PAGE_KERNEL), - 0); - mcl++; - mmu->ptr = ((u64)mfn << PAGE_SHIFT) - | MMU_MACHPHYS_UPDATE; - mmu->val = pfn; - mmu++; - - set_phys_to_machine(pfn, mfn); - } - __skb_queue_tail(&free_list, skb); - xfer++; + kfree_skb(skb); } - dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", - __func__, xfer, noxfer, unused); - - if (xfer) { - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - /* Do all the remapping work and M2P updates. */ - MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, - NULL, DOMID_SELF); - mcl++; - HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); - } - } - - __skb_queue_purge(&free_list); - spin_unlock_bh(&np->rx_lock); } @@ -1339,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; + np->grant_tx_page[i] = NULL; } /* A grant for every tx ring slot */ diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index ac0bdded060f..a0de045eb227 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -738,6 +738,8 @@ struct qeth_discipline { int (*freeze)(struct ccwgroup_device *); int (*thaw) (struct ccwgroup_device *); int (*restore)(struct ccwgroup_device *); + int (*control_event_handler)(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); }; struct qeth_vlan_vid { @@ -948,13 +950,10 @@ int qeth_query_card_info(struct qeth_card *card, int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param); -void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); -void qeth_bridgeport_query_support(struct qeth_card *card); int qeth_bridgeport_query_ports(struct qeth_card *card, enum qeth_sbp_roles *role, enum qeth_sbp_states *state); int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); int qeth_bridgeport_an_set(struct qeth_card *card, int enable); -void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd); int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); int qeth_get_elements_for_frags(struct sk_buff *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c05dacbf4e23..c3a83df07894 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -69,6 +69,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); struct workqueue_struct *qeth_wq; +EXPORT_SYMBOL_GPL(qeth_wq); static void qeth_close_dev_handler(struct work_struct *work) { @@ -616,15 +617,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, qeth_schedule_recovery(card); return NULL; case IPA_CMD_SETBRIDGEPORT: - if (cmd->data.sbp.hdr.command_code == - IPA_SBP_BRIDGE_PORT_STATE_CHANGE) { - qeth_bridge_state_change(card, cmd); - return NULL; - } else - return cmd; case IPA_CMD_ADDRESS_CHANGE_NOTIF: - qeth_bridge_host_event(card, cmd); - return NULL; + if (card->discipline->control_event_handler + (card, cmd)) + return cmd; + else + return NULL; case IPA_CMD_MODCCID: return cmd; case IPA_CMD_REGISTER_LOCAL_ADDR: @@ -4973,10 +4971,6 @@ retriable: qeth_query_setadapterparms(card); if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) qeth_query_setdiagass(card); - qeth_bridgeport_query_support(card); - if (card->options.sbp.supported_funcs) - dev_info(&card->gdev->dev, - "The device represents a HiperSockets Bridge Capable Port\n"); return 0; out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 914d2c121fd8..0710550093ce 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -33,6 +33,11 @@ static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, unsigned long)); static void qeth_l2_set_multicast_list(struct net_device *); static int qeth_l2_recover(void *); +static void qeth_bridgeport_query_support(struct qeth_card *card); +static void qeth_bridge_state_change(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); +static void qeth_bridge_host_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { @@ -989,6 +994,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) rc = -ENODEV; goto out_remove; } + qeth_bridgeport_query_support(card); + if (card->options.sbp.supported_funcs) + dev_info(&card->gdev->dev, + "The device represents a HiperSockets Bridge Capable Port\n"); qeth_trace_features(card); if (!card->dev && qeth_l2_setup_netdev(card)) { @@ -1233,6 +1242,26 @@ out: return rc; } +/* Returns zero if the command is successfully "consumed" */ +static int qeth_l2_control_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + switch (cmd->hdr.command) { + case IPA_CMD_SETBRIDGEPORT: + if (cmd->data.sbp.hdr.command_code == + IPA_SBP_BRIDGE_PORT_STATE_CHANGE) { + qeth_bridge_state_change(card, cmd); + return 0; + } else + return 1; + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + qeth_bridge_host_event(card, cmd); + return 0; + default: + return 1; + } +} + struct qeth_discipline qeth_l2_discipline = { .start_poll = qeth_qdio_start_poll, .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, @@ -1246,6 +1275,7 @@ struct qeth_discipline qeth_l2_discipline = { .freeze = qeth_l2_pm_suspend, .thaw = qeth_l2_pm_resume, .restore = qeth_l2_pm_resume, + .control_event_handler = qeth_l2_control_event, }; EXPORT_SYMBOL_GPL(qeth_l2_discipline); @@ -1463,7 +1493,8 @@ static void qeth_bridge_state_change_worker(struct work_struct *work) kfree(data); } -void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd) +static void qeth_bridge_state_change(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) { struct qeth_sbp_state_change *qports = &cmd->data.sbp.data.state_change; @@ -1488,7 +1519,6 @@ void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd) sizeof(struct qeth_sbp_state_change) + extrasize); queue_work(qeth_wq, &data->worker); } -EXPORT_SYMBOL(qeth_bridge_state_change); struct qeth_bridge_host_data { struct work_struct worker; @@ -1528,7 +1558,8 @@ static void qeth_bridge_host_event_worker(struct work_struct *work) kfree(data); } -void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd) +static void qeth_bridge_host_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) { struct qeth_ipacmd_addr_change *hostevs = &cmd->data.addrchange; @@ -1560,7 +1591,6 @@ void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd) sizeof(struct qeth_ipacmd_addr_change) + extrasize); queue_work(qeth_wq, &data->worker); } -EXPORT_SYMBOL(qeth_bridge_host_event); /* SETBRIDGEPORT support; sending commands */ @@ -1683,7 +1713,7 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card, * Sets bitmask of supported setbridgeport subfunctions in the qeth_card * strucutre: card->options.sbp.supported_funcs. */ -void qeth_bridgeport_query_support(struct qeth_card *card) +static void qeth_bridgeport_query_support(struct qeth_card *card) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; @@ -1709,7 +1739,6 @@ void qeth_bridgeport_query_support(struct qeth_card *card) } card->options.sbp.supported_funcs = cbctl.data.supported; } -EXPORT_SYMBOL_GPL(qeth_bridgeport_query_support); static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index c1b0b2761f8d..0f430424c3b8 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3593,6 +3593,13 @@ out: return rc; } +/* Returns zero if the command is successfully "consumed" */ +static int qeth_l3_control_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + return 1; +} + struct qeth_discipline qeth_l3_discipline = { .start_poll = qeth_qdio_start_poll, .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, @@ -3606,6 +3613,7 @@ struct qeth_discipline qeth_l3_discipline = { .freeze = qeth_l3_pm_suspend, .thaw = qeth_l3_pm_resume, .restore = qeth_l3_pm_resume, + .control_event_handler = qeth_l3_control_event, }; EXPORT_SYMBOL_GPL(qeth_l3_discipline); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1f689e62e4cb..f589c9af8cbf 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2456,6 +2456,7 @@ void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); void skb_scrub_packet(struct sk_buff *skb, bool xnet); +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct skb_checksum_ops { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8f519dbb358b..5976ef0846bd 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -47,6 +47,8 @@ #include #include #include +#include +#include #include #ifdef CONFIG_NET_CLS_ACT #include @@ -2119,7 +2121,7 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); /** * skb_zerocopy - Zero copy skb to skb * @to: destination buffer - * @source: source buffer + * @from: source buffer * @len: number of bytes to copy from source buffer * @hlen: size of linear headroom in destination buffer * @@ -3916,3 +3918,26 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) nf_reset_trace(skb); } EXPORT_SYMBOL_GPL(skb_scrub_packet); + +/** + * skb_gso_transport_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_transport_seglen is used to determine the real size of the + * individual segments, including Layer4 headers (TCP/UDP). + * + * The MAC/L2 or network (IP, IPv6) headers are not accounted for. + */ +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + unsigned int hdr_len; + + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + hdr_len = tcp_hdrlen(skb); + else + hdr_len = sizeof(struct udphdr); + return hdr_len + shinfo->gso_size; +} +EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); diff --git a/net/ieee802154/6lowpan_iphc.c b/net/ieee802154/6lowpan_iphc.c index 083f905bf109..860aa2d445ba 100644 --- a/net/ieee802154/6lowpan_iphc.c +++ b/net/ieee802154/6lowpan_iphc.c @@ -678,7 +678,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, hc06_ptr += 3; } else { /* compress nothing */ - memcpy(hc06_ptr, &hdr, 4); + memcpy(hc06_ptr, hdr, 4); /* replace the top byte with new ECN | DSCP format */ *hc06_ptr = tmp; hc06_ptr += 4; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e7a92fdb36f6..ec4f762efda5 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -178,7 +178,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info, else itn = net_generic(net, ipgre_net_id); - iph = (const struct iphdr *)skb->data; + iph = (const struct iphdr *)(icmp_hdr(skb) + 1); t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->daddr, iph->saddr, tpi->key); diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 054a3e97d822..3d4da2c16b6a 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -314,7 +314,7 @@ static int ip_rcv_finish(struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; - if (sysctl_ip_early_demux && !skb_dst(skb)) { + if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { const struct net_protocol *ipprot; int protocol = iph->protocol; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index c0e3cb72ad70..bd28f386bd02 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -930,7 +931,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, } rtnl_unlock(); - return PTR_RET(itn->fb_tunnel_dev); + return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev); } EXPORT_SYMBOL_GPL(ip_tunnel_init_net); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 302d6fb1ff2b..51d54dc376f3 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -49,7 +49,7 @@ int ip6_rcv_finish(struct sk_buff *skb) { - if (sysctl_ip_early_demux && !skb_dst(skb)) { + if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { const struct inet6_protocol *ipprot; ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c index 2dae8a5df23f..94425e421213 100644 --- a/net/llc/llc_output.c +++ b/net/llc/llc_output.c @@ -43,7 +43,7 @@ int llc_mac_hdr_init(struct sk_buff *skb, rc = 0; break; default: - WARN(1, "device type not supported: %d\n", skb->dev->type); + break; } return rc; } diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c index 4106ca95ec86..7bf5b5b9e8b9 100644 --- a/net/rxrpc/ar-connection.c +++ b/net/rxrpc/ar-connection.c @@ -381,6 +381,8 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, rxrpc_assign_connection_id(conn); rx->conn = conn; + } else { + spin_lock(&trans->client_lock); } /* we've got a connection with a free channel and we can now attach the diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index 898492a8d61b..34b5490dde65 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -180,7 +180,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, if (copy > len - copied) copy = len - copied; - if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->ip_summed == CHECKSUM_UNNECESSARY || + skb->ip_summed == CHECKSUM_PARTIAL) { ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); } else { @@ -353,6 +354,10 @@ csum_copy_error: if (continue_call) rxrpc_put_call(continue_call); rxrpc_kill_skb(skb); + if (!(flags & MSG_PEEK)) { + if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) + BUG(); + } skb_kill_datagram(&rx->sk, skb, flags); rxrpc_put_call(call); return -EAGAIN; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index fbba5b0ec121..1cb413fead89 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -21,7 +21,6 @@ #include #include #include -#include /* Simple Token Bucket Filter. @@ -148,16 +147,10 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r, * Return length of individual segments of a gso packet, * including all headers (MAC, IP, TCP/UDP) */ -static unsigned int skb_gso_seglen(const struct sk_buff *skb) +static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) { unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); - const struct skb_shared_info *shinfo = skb_shinfo(skb); - - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) - hdr_len += tcp_hdrlen(skb); - else - hdr_len += sizeof(struct udphdr); - return hdr_len + shinfo->gso_size; + return hdr_len + skb_gso_transport_seglen(skb); } /* GSO packet is too big, segment it so that tbf can transmit @@ -202,7 +195,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) int ret; if (qdisc_pkt_len(skb) > q->max_size) { - if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size) + if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) return tbf_segment(skb, sch); return qdisc_reshape_fail(skb, sch); }