Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Need to take mutex in ath9k_add_interface(), from Dan Carpenter.

 2) Fix mt76 build without CONFIG_LEDS_CLASS, from Arnd Bergmann.

 3) Fix socket wmem accounting in SCTP, from Xin Long.

 4) Fix failed resume crash in ena driver, from Arthur Kiyanovski.

 5) qed driver passes bytes instead of bits into second arg of
    bitmap_weight(). From Denis Bolotin.

 6) Fix reset deadlock in ibmvnic, from Juliet Kim.

 7) skb_scrube_packet() needs to scrub the fwd marks too, from Petr
    Machata.

 8) Make sure older TCP stacks see enough dup ACKs, and avoid doing SACK
    compression during this period, from Eric Dumazet.

 9) Add atomicity to SMC protocol cursor handling, from Ursula Braun.

10) Don't leave dangling error pointer if bpf_prog_add() fails in
    thunderx driver, from Lorenzo Bianconi. Also, when we unmap TSO
    headers, set sq->tso_hdrs to NULL.

11) Fix race condition over state variables in act_police, from Davide
    Caratti.

12) Disable guest csum in the presence of XDP in virtio_net, from Jason
    Wang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (64 commits)
  net: gemini: Fix copy/paste error
  net: phy: mscc: fix deadlock in vsc85xx_default_config
  dt-bindings: dsa: Fix typo in "probed"
  net: thunderx: set tso_hdrs pointer to NULL in nicvf_free_snd_queue
  net: amd: add missing of_node_put()
  team: no need to do team_notify_peers or team_mcast_rejoin when disabling port
  virtio-net: fail XDP set if guest csum is negotiated
  virtio-net: disable guest csum during XDP set
  net/sched: act_police: add missing spinlock initialization
  net: don't keep lonely packets forever in the gro hash
  net/ipv6: re-do dad when interface has IFF_NOARP flag change
  packet: copy user buffers before orphan or clone
  ibmvnic: Update driver queues after change in ring size support
  ibmvnic: Fix RX queue buffer cleanup
  net: thunderx: set xdp_prog to NULL if bpf_prog_add fails
  net/dim: Update DIM start sample after each DIM iteration
  net: faraday: ftmac100: remove netif_running(netdev) check before disabling interrupts
  net/smc: use after free fix in smc_wr_tx_put_slot()
  net/smc: atomic SMCD cursor handling
  net/smc: add SMC-D shutdown signal
  ...
This commit is contained in:
Linus Torvalds 2018-11-24 09:19:38 -08:00
commit 857fa628bb
67 changed files with 537 additions and 335 deletions

View File

@ -7,7 +7,7 @@ limitations.
Current Binding
---------------
Switches are true Linux devices and can be probes by any means. Once
Switches are true Linux devices and can be probed by any means. Once
probed, they register to the DSA framework, passing a node
pointer. This node is expected to fulfil the following binding, and
may contain additional properties as required by the device it is

View File

@ -180,6 +180,7 @@ F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Heiner Kallweit <hkallweit1@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/realtek/r8169.c
@ -5534,6 +5535,7 @@ F: net/bridge/
ETHERNET PHY LIBRARY
M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
M: Heiner Kallweit <hkallweit1@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-mdio

View File

@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter)
rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc)
dev_err(&adapter->pdev->dev, "Device reset failed\n");
/* stop submitting admin commands on a device that was reset */
ena_com_set_admin_running_state(adapter->ena_dev, false);
}
ena_destroy_all_io_queues(adapter);
@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev)
netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
return 0;
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_down(adapter);
/* Stop the device from sending AENQ events (in case reset flag is set
* and device is up, ena_close already reset the device
* In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
* and device is up, ena_down() already reset the device.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
@ -2694,8 +2697,8 @@ err_device_destroy:
ena_com_abort_admin_commands(ena_dev);
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_mmio_reg_read_request_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@ -3452,6 +3455,8 @@ err_rss:
ena_com_rss_destroy(ena_dev);
err_free_msix:
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
/* stop submitting admin commands on a device that was reset */
ena_com_set_admin_running_state(ena_dev, false);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_worker_destroy:
@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
/* If the device is running then we want to make sure the device will be
* reset to make sure no more events will be issued by the device.
*/
if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
rtnl_lock();
ena_destroy_device(adapter, true);
rtnl_unlock();
unregister_netdev(netdev);
free_netdev(netdev);
ena_com_rss_destroy(ena_dev);

View File

@ -45,7 +45,7 @@
#define DRV_MODULE_VER_MAJOR 2
#define DRV_MODULE_VER_MINOR 0
#define DRV_MODULE_VER_SUBMINOR 1
#define DRV_MODULE_VER_SUBMINOR 2
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION

View File

@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
prop = of_get_property(nd, "tpe-link-test?", NULL);
if (!prop)
goto no_link_test;
goto node_put;
if (strcmp(prop, "true")) {
printk(KERN_NOTICE "SunLance: warning: overriding option "
@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
"to ecd@skynet.be\n");
auxio_set_lte(AUXIO_LTE_ON);
}
node_put:
of_node_put(nd);
no_link_test:
lp->auto_select = 1;
lp->tpe = 0;

View File

@ -12422,6 +12422,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
bool reset_phy = false;
if ((ering->rx_pending > tp->rx_std_ring_mask) ||
(ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
@ -12453,7 +12454,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
err = tg3_restart_hw(tp, false);
/* Reset PHY to avoid PHY lock up */
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
}
@ -12487,6 +12494,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
{
struct tg3 *tp = netdev_priv(dev);
int err = 0;
bool reset_phy = false;
if (tp->link_config.autoneg == AUTONEG_ENABLE)
tg3_warn_mgmt_link_flap(tp);
@ -12556,7 +12564,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
err = tg3_restart_hw(tp, false);
/* Reset PHY to avoid PHY lock up */
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
}

View File

@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
bool if_up = netif_running(nic->netdev);
struct bpf_prog *old_prog;
bool bpf_attached = false;
int ret = 0;
/* For now just support only the usual MTU sized frames */
if (prog && (dev->mtu > 1500)) {
@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
if (nic->xdp_prog) {
/* Attach BPF program */
nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
if (!IS_ERR(nic->xdp_prog))
if (!IS_ERR(nic->xdp_prog)) {
bpf_attached = true;
} else {
ret = PTR_ERR(nic->xdp_prog);
nic->xdp_prog = NULL;
}
}
/* Calculate Tx queues needed for XDP and network stack */
@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
netif_trans_update(nic->netdev);
}
return 0;
return ret;
}
static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)

View File

@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
if (!sq->dmem.base)
return;
if (sq->tso_hdrs)
if (sq->tso_hdrs) {
dma_free_coherent(&nic->pdev->dev,
sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
sq->tso_hdrs = NULL;
}
/* Free pending skbs in the queue */
smp_rmb();

View File

@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
u64_stats_update_begin(&port->tx_stats_syncp);
port->tx_frag_stats[nfrags]++;
u64_stats_update_end(&port->ir_stats_syncp);
u64_stats_update_end(&port->tx_stats_syncp);
}
}

View File

@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
struct net_device *netdev = dev_id;
struct ftmac100 *priv = netdev_priv(netdev);
if (likely(netif_running(netdev))) {
/* Disable interrupts for polling */
ftmac100_disable_all_int(priv);
if (likely(netif_running(netdev)))
napi_schedule(&priv->napi);
}
return IRQ_HANDLED;
}

View File

@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
for (j = 0; j < rx_pool->size; j++) {
if (rx_pool->rx_buff[j].skb) {
dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
rx_pool->rx_buff[i].skb = NULL;
dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
rx_pool->rx_buff[j].skb = NULL;
}
}
@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev)
return 0;
}
mutex_lock(&adapter->reset_lock);
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc) {
mutex_unlock(&adapter->reset_lock);
if (rc)
return rc;
}
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
mutex_unlock(&adapter->reset_lock);
return rc;
}
}
@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev);
mutex_unlock(&adapter->reset_lock);
return rc;
}
@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev)
return 0;
}
mutex_lock(&adapter->reset_lock);
rc = __ibmvnic_close(netdev);
ibmvnic_cleanup(netdev);
mutex_unlock(&adapter->reset_lock);
return rc;
}
@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
int i, rc;
@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
ibmvnic_cleanup(netdev);
@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
} else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
adapter->map_id = 1;
adapter->req_tx_queues != old_num_tx_queues ||
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
adapter->req_tx_entries_per_subcrq !=
old_num_tx_slots) {
release_rx_pools(adapter);
release_tx_pools(adapter);
rc = init_rx_pools(netdev);
if (rc)
return rc;
rc = init_tx_pools(netdev);
release_napi(adapter);
release_vpd_data(adapter);
rc = init_resources(adapter);
if (rc)
return rc;
release_napi(adapter);
rc = init_napi(adapter);
if (rc)
return rc;
} else {
rc = reset_tx_pools(adapter);
if (rc)
@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
adapter->state = VNIC_PROBED;
return 0;
}
/* netif_set_real_num_xx_queues needs to take rtnl lock here
* unless wait_for_reset is set, in which case the rtnl lock
* has already been taken before initializing the reset
*/
if (!adapter->wait_for_reset) {
rtnl_lock();
rc = init_resources(adapter);
rtnl_unlock();
} else {
rc = init_resources(adapter);
}
if (rc)
return rc;
@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work)
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
bool we_lock_rtnl = false;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
netdev = adapter->netdev;
mutex_lock(&adapter->reset_lock);
/* netif_set_real_num_xx_queues needs to take rtnl lock here
* unless wait_for_reset is set, in which case the rtnl lock
* has already been taken before initializing the reset
*/
if (!adapter->wait_for_reset) {
rtnl_lock();
we_lock_rtnl = true;
}
reset_state = adapter->state;
rwi = get_next_rwi(adapter);
@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work)
if (rc) {
netdev_dbg(adapter->netdev, "Reset failed\n");
free_all_rwi(adapter);
mutex_unlock(&adapter->reset_lock);
return;
}
adapter->resetting = false;
mutex_unlock(&adapter->reset_lock);
if (we_lock_rtnl)
rtnl_unlock();
}
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
mutex_init(&adapter->reset_lock);
mutex_init(&adapter->rwi_lock);
adapter->resetting = false;
@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
adapter->state = VNIC_REMOVING;
unregister_netdev(netdev);
mutex_lock(&adapter->reset_lock);
rtnl_lock();
unregister_netdevice(netdev);
release_resources(adapter);
release_sub_crqs(adapter, 1);
@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
adapter->state = VNIC_REMOVED;
mutex_unlock(&adapter->reset_lock);
rtnl_unlock();
device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);

View File

@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet;
enum vnic_state state;
enum ibmvnic_reset_reason reset_reason;
struct mutex reset_lock, rwi_lock;
struct mutex rwi_lock;
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;

View File

@ -569,6 +569,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
unsigned int hw_mtu;
struct net_dim dim; /* Dynamic Interrupt Moderation */

View File

@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
*speed = mlx5e_port_ptys2speed(eth_proto_oper);
if (!(*speed)) {
mlx5_core_warn(mdev, "cannot get port speed\n");
if (!(*speed))
err = -EINVAL;
}
return err;
}
@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm,
case 40000:
if (!write)
*fec_policy = MLX5_GET(pplm_reg, pplm,
fec_override_cap_10g_40g);
fec_override_admin_10g_40g);
else
MLX5_SET(pplm_reg, pplm,
fec_override_admin_10g_40g, *fec_policy);
@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
case 10000:
case 40000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
fec_override_admin_10g_40g);
fec_override_cap_10g_40g);
break;
case 25000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
{
u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
bool fec_mode_not_supp_in_speed = false;
u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
u32 current_fec_speed;
u8 fec_policy_auto = 0;
u8 fec_caps = 0;
int err;
int i;
@ -415,22 +413,18 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
if (err)
return err;
err = mlx5e_port_linkspeed(dev, &current_fec_speed);
if (err)
return err;
MLX5_SET(pplm_reg, out, local_port, 1);
memset(in, 0, sz);
MLX5_SET(pplm_reg, in, local_port, 1);
for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
/* policy supported for link speed */
if (!!(fec_caps & fec_policy)) {
mlx5e_fec_admin_field(in, &fec_policy, 1,
/* policy supported for link speed, or policy is auto */
if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
mlx5e_fec_admin_field(out, &fec_policy, 1,
fec_supported_speeds[i]);
} else {
if (fec_supported_speeds[i] == current_fec_speed)
return -EOPNOTSUPP;
mlx5e_fec_admin_field(in, &no_fec_policy, 1,
/* turn off FEC if supported. Else, leave it the same */
if (fec_caps & fec_policy_nofec)
mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
fec_supported_speeds[i]);
fec_mode_not_supp_in_speed = true;
}
@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
"FEC policy 0x%x is not supported for some speeds",
fec_policy);
return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1);
return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
}

View File

@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
if (err)
if (err) {
mlx5_core_warn(priv->mdev, "cannot get port speed\n");
return 0;
}
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;

View File

@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
err = get_fec_supported_advertised(mdev, link_ksettings);
if (err)
if (get_fec_supported_advertised(mdev, link_ksettings))
netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
__func__, err);

View File

@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
int err;
u32 i;
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
if (err)
return err;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
return err;
mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int eqn;
int err;
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
if (err)
return err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
int err;
int eqn;
err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
if (err)
return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
c->irq_desc = irq_to_desc(irq);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
#ifdef CONFIG_MLX5_ESWITCH
static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
return 0;
}
#endif
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev,
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
#ifdef CONFIG_MLX5_ESWITCH
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
#endif
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
}
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
reset = reset && (ppw_old != ppw_new);
reset = reset && (is_linear || (ppw_old != ppw_new));
}
if (!reset) {
@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
#ifdef CONFIG_MLX5_ESWITCH
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
@ -5004,11 +5019,21 @@ err_free_netdev:
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const struct mlx5e_profile *profile;
int max_nch;
int err;
profile = priv->profile;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
max_nch = mlx5e_get_max_num_channels(priv->mdev);
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
priv->channels.params.num_channels = max_nch;
mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, max_nch);
}
err = profile->init_tx(priv);
if (err)
goto out;

View File

@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u32 frag_size;
bool consumed;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
rq->stats->oversize_pkts_sw_drop++;
return NULL;
}
va = page_address(di->page) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);

View File

@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
return 1;
}
#ifdef CONFIG_INET
/* loopback test */
#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
struct mlx5ehdr {
__be32 version;
__be64 magic;
char text[ETH_GSTRING_LEN];
};
#ifdef CONFIG_INET
/* loopback test */
#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
{
struct sk_buff *skb = NULL;
@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
struct ethhdr *ethh;
struct udphdr *udph;
struct iphdr *iph;
int datalen, iplen;
datalen = MLX5E_TEST_PKT_SIZE -
(sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
int iplen;
skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
if (!skb) {
@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
/* Fill UDP header */
udph->source = htons(9);
udph->dest = htons(9); /* Discard Protocol */
udph->len = htons(datalen + sizeof(struct udphdr));
udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
udph->check = 0;
/* Fill IP header */
@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
iph->ttl = 32;
iph->version = 4;
iph->protocol = IPPROTO_UDP;
iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
sizeof(struct mlx5ehdr);
iph->tot_len = htons(iplen);
iph->frag_off = 0;
iph->saddr = 0;
@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
mlxh = skb_put(skb, sizeof(*mlxh));
mlxh->version = 0;
mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
datalen -= sizeof(*mlxh);
skb_put_zero(skb, datalen);
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;

View File

@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },

View File

@ -96,6 +96,7 @@ struct mlx5e_sw_stats {
u64 rx_wqe_err;
u64 rx_mpwqe_filler_cqes;
u64 rx_mpwqe_filler_strides;
u64 rx_oversize_pkts_sw_drop;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
@ -193,6 +194,7 @@ struct mlx5e_rq_stats {
u64 wqe_err;
u64 mpwqe_filler_cqes;
u64 mpwqe_filler_strides;
u64 oversize_pkts_sw_drop;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;

View File

@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
inner_headers);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
FLOW_DISSECTOR_KEY_BASIC,
f->key);
struct flow_dissector_key_eth_addrs *mask =
struct flow_dissector_key_basic *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
FLOW_DISSECTOR_KEY_BASIC,
f->mask);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
ntohs(mask->n_proto));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
ntohs(key->n_proto));
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
mask->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
key->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
mask->src);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
key->src);
if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
if (mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
} else {
} else if (*match_level != MLX5_MATCH_NONE) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
*match_level = MLX5_MATCH_L2;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->key);
struct flow_dissector_key_basic *mask =
struct flow_dissector_key_eth_addrs *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->mask);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
ntohs(mask->n_proto));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
ntohs(key->n_proto));
if (mask->n_proto)
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
mask->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
key->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
mask->src);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
key->src);
if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
*match_level = MLX5_MATCH_L2;
}
@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
/* the HW doesn't need L3 inline to match on frag=no */
if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
*match_level = MLX5_INLINE_MODE_L2;
*match_level = MLX5_MATCH_L2;
/* *** L2 attributes parsing up to here *** */
else
*match_level = MLX5_INLINE_MODE_IP;
*match_level = MLX5_MATCH_L3;
}
}
@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");

View File

@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
};
static const struct rhashtable_params rhash_sa = {
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
/* Keep out "cmd" field from the key as it's
* value is not constant during the lifetime
* of the key object.
*/
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
.automatic_shrinking = true,
.min_size = 1,

View File

@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev)
netif_carrier_off(epriv->netdev);
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mlx5i_uninit_underlay_qp(epriv);
mlx5e_deactivate_priv_channels(epriv);
mlx5e_close_channels(&epriv->channels);
mlx5i_uninit_underlay_qp(epriv);
unlock:
mutex_unlock(&epriv->state_lock);
return 0;

View File

@ -485,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
/* Can't have multiple flags set here */
if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
if (bitmap_weight((unsigned long *)&pq_flags,
sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
goto err;
}
if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
goto err;
}
switch (pq_flags) {
case PQ_FLAGS_RLS:
@ -510,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
}
err:
DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
return NULL;
return &qm_info->start_pq;
}
/* save pq index in qm info */
@ -535,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
{
u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
if (max_tc == 0) {
DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
PQ_FLAGS_MCOS);
return p_hwfn->qm_info.start_pq;
}
if (tc > max_tc)
DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
}
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
{
u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
if (max_vf == 0) {
DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
PQ_FLAGS_VFS);
return p_hwfn->qm_info.start_pq;
}
if (vf > max_vf)
DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
}
u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)

View File

@ -810,17 +810,13 @@ static int vsc85xx_default_config(struct phy_device *phydev)
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
if (rc < 0)
goto out_unlock;
reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK,
reg_val);
out_unlock:
rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
mutex_unlock(&phydev->lock);
return rc;

View File

@ -985,8 +985,6 @@ static void team_port_disable(struct team *team,
team->en_port_count--;
team_queue_override_port_del(team, port);
team_adjust_ops(team);
team_notify_peers(team);
team_mcast_rejoin(team);
team_lower_state_changed(port);
}

View File

@ -70,7 +70,8 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_TSO4,
VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN,
VIRTIO_NET_F_GUEST_UFO
VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_GUEST_CSUM
};
struct virtnet_stat_desc {
@ -2334,9 +2335,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
if (!vi->guest_offloads)
return 0;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
return virtnet_set_guest_offloads(vi, offloads);
}
@ -2346,8 +2344,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
if (!vi->guest_offloads)
return 0;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
return virtnet_set_guest_offloads(vi, offloads);
}
@ -2365,8 +2361,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
&& (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
return -EOPNOTSUPP;
}

View File

@ -6867,7 +6867,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 bitmap;
if (drop) {
if (vif->type == NL80211_IFTYPE_STATION) {
if (vif && vif->type == NL80211_IFTYPE_STATION) {
bitmap = ~(1 << WMI_MGMT_TID);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)

View File

@ -1251,6 +1251,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_node *an = &avp->mcast_node;
mutex_lock(&sc->mutex);
if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
@ -1259,8 +1260,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->tx99_vif = vif;
}
mutex_lock(&sc->mutex);
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->cur_chan->nvifs++;

View File

@ -6005,7 +6005,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
* for subsequent chanspecs.
*/
channel->flags = IEEE80211_CHAN_NO_HT40 |
IEEE80211_CHAN_NO_80MHZ;
IEEE80211_CHAN_NO_80MHZ |
IEEE80211_CHAN_NO_160MHZ;
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
chaninfo = ch.chspec;

View File

@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
}
break;
case BRCMU_CHSPEC_D11AC_BW_160:
ch->bw = BRCMU_CHAN_BW_160;
ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
BRCMU_CHSPEC_D11AC_SB_SHIFT);
switch (ch->sb) {
case BRCMU_CHAN_SB_LLL:
ch->control_ch_num -= CH_70MHZ_APART;

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -26,6 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -81,7 +83,7 @@
#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2)
#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_TABLE_SIZE + 3)
#define ACPI_WGDS_WIFI_DATA_SIZE 18
#define ACPI_WGDS_WIFI_DATA_SIZE 19
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2

View File

@ -154,7 +154,11 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
struct dentry *dbgfs_dir);
void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
{
kfree(fwrt->dump.d3_debug_data);
fwrt->dump.d3_debug_data = NULL;
}
void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);

View File

@ -893,7 +893,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE);
ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
@ -928,6 +928,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
return -ENOENT;
}
static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
{
return -ENOENT;
}
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
return 0;
@ -954,8 +959,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* if not available, don't fail and don't bother with EWRD */
return 0;
/*
* If not available, don't fail and don't bother with EWRD.
* Return 1 to tell that we can't use WGDS either.
*/
return 1;
}
ret = iwl_mvm_sar_get_ewrd_table(mvm);
@ -968,9 +976,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
/* choose profile 1 (WRDS) as default for both chains */
ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
/* if we don't have profile 0 from BIOS, just skip it */
/*
* If we don't have profile 0 from BIOS, just skip it. This
* means that SAR Geo will not be enabled either, even if we
* have other valid profiles.
*/
if (ret == -ENOENT)
return 0;
return 1;
return ret;
}
@ -1168,11 +1180,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
ret = iwl_mvm_sar_init(mvm);
if (ret)
goto error;
if (ret == 0) {
ret = iwl_mvm_sar_geo_init(mvm);
if (ret)
} else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
* available, issue an error, because we can't use SAR
* Geo without basic SAR.
*/
IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
}
if (ret < 0)
goto error;
iwl_mvm_leds_sync(mvm);

View File

@ -301,8 +301,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
goto out;
}
if (changed)
*changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
if (changed) {
u32 status = le32_to_cpu(resp->status);
*changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
status == MCC_RESP_ILLEGAL);
}
regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
__le32_to_cpu(resp->n_channels),
@ -4444,10 +4448,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;

View File

@ -539,9 +539,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
}
IWL_DEBUG_LAR(mvm,
"MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
status, mcc, mcc >> 8, mcc & 0xff,
!!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
"MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
status, mcc, mcc >> 8, mcc & 0xff, n_channels);
exit:
iwl_free_resp(&cmd);

View File

@ -858,6 +858,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_thermal_exit(mvm);
out_free:
iwl_fw_flush_dump(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
return op_mode;
@ -910,6 +911,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_tof_clean(mvm);
iwl_fw_runtime_free(&mvm->fwrt);
mutex_destroy(&mvm->mutex);
mutex_destroy(&mvm->d0i3_suspend_mutex);

View File

@ -1,6 +1,12 @@
config MT76_CORE
tristate
config MT76_LEDS
bool
depends on MT76_CORE
depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
default y
config MT76_USB
tristate
depends on MT76_CORE

View File

@ -345,9 +345,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
mt76_check_sband(dev, NL80211_BAND_2GHZ);
mt76_check_sband(dev, NL80211_BAND_5GHZ);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
ret = mt76_led_init(dev);
if (ret)
return ret;
}
return ieee80211_register_hw(hw);
}

View File

@ -71,7 +71,6 @@ struct mt76x02_dev {
struct mac_address macaddr_list[8];
struct mutex phy_mutex;
struct mutex mutex;
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);

View File

@ -507,8 +507,10 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
mt76x2_dfs_init_detector(dev);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
}
ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));

View File

@ -272,9 +272,9 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
if (val != ~0 && val > 0xffff)
return -EINVAL;
mutex_lock(&dev->mutex);
mutex_lock(&dev->mt76.mutex);
mt76x2_mac_set_tx_protection(dev, val);
mutex_unlock(&dev->mutex);
mutex_unlock(&dev->mt76.mutex);
return 0;
}

View File

@ -285,7 +285,7 @@ static int wl1271_probe(struct sdio_func *func,
struct resource res[2];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
int irq, wakeirq;
int irq, wakeirq, num_irqs;
const char *chip_family;
/* We are only able to handle the wlan function */
@ -353,12 +353,17 @@ static int wl1271_probe(struct sdio_func *func,
irqd_get_trigger_type(irq_get_irq_data(irq));
res[0].name = "irq";
if (wakeirq > 0) {
res[1].start = wakeirq;
res[1].flags = IORESOURCE_IRQ |
irqd_get_trigger_type(irq_get_irq_data(wakeirq));
res[1].name = "wakeirq";
ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
num_irqs = 2;
} else {
num_irqs = 1;
}
ret = platform_device_add_resources(glue->core, res, num_irqs);
if (ret) {
dev_err(glue->dev, "can't add resources\n");
goto out_dev_put;

View File

@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
}
/* fall through */
case NET_DIM_START_MEASURE:
net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
&dim->start_sample);
dim->state = NET_DIM_MEASURE_IN_PROGRESS;
break;
case NET_DIM_APPLY_NEW_PROFILE:

View File

@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
}
}
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
}
static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
{
return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
}
static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
{
return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
{
@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
if (uarg->callback == sock_zerocopy_callback) {
uarg->zerocopy = uarg->zerocopy && zerocopy;
sock_zerocopy_put(uarg);
} else {
} else if (!skb_zcopy_is_nouarg(skb)) {
uarg->callback(uarg, zerocopy);
}

View File

@ -196,6 +196,7 @@ struct tcp_sock {
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 compressed_ack_rcv_nxt;
u32 tsoffset; /* timestamp offset */

View File

@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
SCTP_DEFAULT_MINSEGMENT));
}
static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
{
__u32 pmtu = sctp_dst_mtu(t->dst);
if (t->pathmtu == pmtu)
return true;
t->pathmtu = pmtu;
return false;
}
#endif /* __net_sctp_h__ */

View File

@ -5970,11 +5970,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
if (work_done)
timeout = n->dev->gro_flush_timeout;
/* When the NAPI instance uses a timeout and keeps postponing
* it, we need to bound somehow the time packets are kept in
* the GRO layer
*/
napi_gro_flush(n, !!timeout);
if (timeout)
hrtimer_start(&n->timer, ns_to_ktime(timeout),
HRTIMER_MODE_REL_PINNED);
else
napi_gro_flush(n, false);
}
if (unlikely(!list_empty(&n->poll_list))) {
/* If n->poll_list is not empty, we need to mask irqs */

View File

@ -4854,6 +4854,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
nf_reset(skb);
nf_reset_trace(skb);
#ifdef CONFIG_NET_SWITCHDEV
skb->offload_fwd_mark = 0;
skb->offload_mr_fwd_mark = 0;
#endif
if (!xnet)
return;

View File

@ -4268,7 +4268,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
* If the sack array is full, forget about the last one.
*/
if (this_sack >= TCP_NUM_SACKS) {
if (tp->compressed_ack)
if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
tcp_send_ack(sk);
this_sack--;
tp->rx_opt.num_sacks--;
@ -4363,6 +4363,7 @@ static bool tcp_try_coalesce(struct sock *sk,
if (TCP_SKB_CB(from)->has_rxtstamp) {
TCP_SKB_CB(to)->has_rxtstamp = true;
to->tstamp = from->tstamp;
skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
}
return true;
@ -5188,7 +5189,17 @@ send_now:
if (!tcp_is_sack(tp) ||
tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
goto send_now;
tp->compressed_ack++;
if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
tp->compressed_ack - TCP_FASTRETRANS_THRESH);
tp->compressed_ack = 0;
}
if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH)
goto send_now;
if (hrtimer_is_queued(&tp->compressed_ack_timer))
return;

View File

@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
{
struct tcp_sock *tp = tcp_sk(sk);
if (unlikely(tp->compressed_ack)) {
if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
tp->compressed_ack);
tp->compressed_ack = 0;
tp->compressed_ack - TCP_FASTRETRANS_THRESH);
tp->compressed_ack = TCP_FASTRETRANS_THRESH;
if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
__sock_put(sk);
}

View File

@ -740,7 +740,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
if (tp->compressed_ack)
if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
tcp_send_ack(sk);
} else {
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,

View File

@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
static void addrconf_dad_work(struct work_struct *w);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
bool send_na);
static void addrconf_dad_run(struct inet6_dev *idev);
static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
static void addrconf_rs_timer(struct timer_list *t);
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_change_info *change_info;
struct netdev_notifier_changeupper_info *info;
struct inet6_dev *idev = __in6_dev_get(dev);
struct net *net = dev_net(dev);
@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
break;
}
if (idev) {
if (!IS_ERR_OR_NULL(idev)) {
if (idev->if_flags & IF_READY) {
/* device is already configured -
* but resend MLD reports, we might
@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
* multicast snooping switches
*/
ipv6_mc_up(idev);
change_info = ptr;
if (change_info->flags_changed & IFF_NOARP)
addrconf_dad_run(idev, true);
rt6_sync_up(dev, RTNH_F_LINKDOWN);
break;
}
@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (!IS_ERR_OR_NULL(idev)) {
if (run_pending)
addrconf_dad_run(idev);
addrconf_dad_run(idev, false);
/* Device has an address by now */
rt6_sync_up(dev, RTNH_F_DEAD);
@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
addrconf_verify_rtnl();
}
static void addrconf_dad_run(struct inet6_dev *idev)
static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
{
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
spin_lock(&ifp->lock);
if (ifp->flags & IFA_F_TENTATIVE &&
ifp->state == INET6_IFADDR_STATE_DAD)
if ((ifp->flags & IFA_F_TENTATIVE &&
ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
if (restart)
ifp->state = INET6_IFADDR_STATE_PREDAD;
addrconf_dad_kick(ifp);
}
spin_unlock(&ifp->lock);
}
read_unlock_bh(&idev->lock);

View File

@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
void *ph;
__u32 ts;
ph = skb_shinfo(skb)->destructor_arg;
ph = skb_zcopy_get_nouarg(skb);
packet_dec_pending(&po->tx_ring);
ts = __packet_set_timestamp(po, ph, skb);
@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->mark = po->sk.sk_mark;
skb->tstamp = sockc->transmit_time;
sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
skb_shinfo(skb)->destructor_arg = ph.raw;
skb_zcopy_set_nouarg(skb, ph.raw);
skb_reserve(skb, hlen);
skb_reset_network_header(skb);

View File

@ -27,10 +27,7 @@ struct tcf_police_params {
u32 tcfp_ewma_rate;
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_toks;
s64 tcfp_ptoks;
s64 tcfp_mtu_ptoks;
s64 tcfp_t_c;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
@ -41,6 +38,11 @@ struct tcf_police_params {
struct tcf_police {
struct tc_action common;
struct tcf_police_params __rcu *params;
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
s64 tcfp_t_c;
};
#define to_police(pc) ((struct tcf_police *)pc)
@ -122,6 +124,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
return ret;
}
ret = ACT_P_CREATED;
spin_lock_init(&(to_police(*a)->tcfp_lock));
} else if (!ovr) {
tcf_idr_release(*a, bind);
return -EEXIST;
@ -186,12 +189,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
new->tcfp_toks = new->tcfp_burst;
if (new->peak_present) {
if (new->peak_present)
new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
new->tcfp_mtu);
new->tcfp_ptoks = new->tcfp_mtu_ptoks;
}
if (tb[TCA_POLICE_AVRATE])
new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
@ -207,7 +207,12 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
spin_lock_bh(&police->tcf_lock);
new->tcfp_t_c = ktime_get_ns();
spin_lock_bh(&police->tcfp_lock);
police->tcfp_t_c = ktime_get_ns();
police->tcfp_toks = new->tcfp_burst;
if (new->peak_present)
police->tcfp_ptoks = new->tcfp_mtu_ptoks;
spin_unlock_bh(&police->tcfp_lock);
police->tcf_action = parm->action;
rcu_swap_protected(police->params,
new,
@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
}
now = ktime_get_ns();
toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst);
spin_lock_bh(&police->tcfp_lock);
toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
if (p->peak_present) {
ptoks = toks + p->tcfp_ptoks;
ptoks = toks + police->tcfp_ptoks;
if (ptoks > p->tcfp_mtu_ptoks)
ptoks = p->tcfp_mtu_ptoks;
ptoks -= (s64)psched_l2t_ns(&p->peak,
qdisc_pkt_len(skb));
}
toks += p->tcfp_toks;
toks += police->tcfp_toks;
if (toks > p->tcfp_burst)
toks = p->tcfp_burst;
toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
if ((toks|ptoks) >= 0) {
p->tcfp_t_c = now;
p->tcfp_toks = toks;
p->tcfp_ptoks = ptoks;
police->tcfp_t_c = now;
police->tcfp_toks = toks;
police->tcfp_ptoks = ptoks;
spin_unlock_bh(&police->tcfp_lock);
ret = p->tcfp_result;
goto inc_drops;
}
spin_unlock_bh(&police->tcfp_lock);
}
inc_overlimits:

View File

@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
sctp_transport_route(tp, NULL, sp);
if (asoc->param_flags & SPP_PMTUD_ENABLE)
sctp_assoc_sync_pmtu(asoc);
} else if (!sctp_transport_pmtu_check(tp)) {
if (asoc->param_flags & SPP_PMTUD_ENABLE)
sctp_assoc_sync_pmtu(asoc);
}
if (asoc->pmtu_pending) {
@ -396,25 +399,6 @@ finish:
return retval;
}
static void sctp_packet_release_owner(struct sk_buff *skb)
{
sk_free(skb->sk);
}
static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
{
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sctp_packet_release_owner;
/*
* The data chunks have already been accounted for in sctp_sendmsg(),
* therefore only reserve a single byte to keep socket around until
* the packet has been transmitted.
*/
refcount_inc(&sk->sk_wmem_alloc);
}
static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
{
if (SCTP_OUTPUT_CB(head)->last == head)
@ -601,7 +585,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
if (!head)
goto out;
skb_reserve(head, packet->overhead + MAX_HEADER);
sctp_packet_set_owner_w(head, sk);
skb_set_owner_w(head, sk);
/* set sctp header */
sh = skb_push(head, sizeof(struct sctphdr));

View File

@ -3940,32 +3940,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen != sizeof(params))
goto out;
return -EINVAL;
if (copy_from_user(&params, optval, optlen)) {
retval = -EFAULT;
goto out;
}
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
asoc->prsctp_enable = !!params.assoc_value;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
sp->ep->prsctp_enable = !!params.assoc_value;
} else {
goto out;
}
retval = 0;
out:
return retval;
return 0;
}
static int sctp_setsockopt_default_prinfo(struct sock *sk,

View File

@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
goto out;
}
stream->incnt = incnt;
stream->outcnt = outcnt;
asoc->strreset_outstanding = !!out + !!in;

View File

@ -127,6 +127,8 @@ static int smc_release(struct socket *sock)
smc = smc_sk(sk);
/* cleanup for a dangling non-blocking connect */
if (smc->connect_info && sk->sk_state == SMC_INIT)
tcp_abort(smc->clcsock->sk, ECONNABORTED);
flush_work(&smc->connect_work);
kfree(smc->connect_info);
smc->connect_info = NULL;
@ -547,7 +549,8 @@ static int smc_connect_rdma(struct smc_sock *smc,
mutex_lock(&smc_create_lgr_pending);
local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
ibport, &aclc->lcl, NULL, 0);
ibport, ntoh24(aclc->qpn), &aclc->lcl,
NULL, 0);
if (local_contact < 0) {
if (local_contact == -ENOMEM)
reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
@ -618,7 +621,7 @@ static int smc_connect_ism(struct smc_sock *smc,
int rc = 0;
mutex_lock(&smc_create_lgr_pending);
local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0,
local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0,
NULL, ismdev, aclc->gid);
if (local_contact < 0)
return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
@ -1083,7 +1086,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
int *local_contact)
{
/* allocate connection / link group */
*local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport,
*local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0,
&pclc->lcl, NULL, 0);
if (*local_contact < 0) {
if (*local_contact == -ENOMEM)
@ -1107,7 +1110,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
struct smc_clc_msg_smcd *pclc_smcd;
pclc_smcd = smc_get_clc_msg_smcd(pclc);
*local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL,
*local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL,
ismdev, pclc_smcd->gid);
if (*local_contact < 0) {
if (*local_contact == -ENOMEM)

View File

@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
"must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
BUILD_BUG_ON_MSG(
sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE,
offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
"must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
BUILD_BUG_ON_MSG(
sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
int smcd_cdc_msg_send(struct smc_connection *conn)
{
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
union smc_host_cursor curs;
struct smcd_cdc_msg cdc;
int rc, diff;
memset(&cdc, 0, sizeof(cdc));
cdc.common.type = SMC_CDC_MSG_TYPE;
cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap;
cdc.prod_count = conn->local_tx_ctrl.prod.count;
cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap;
cdc.cons_count = conn->local_tx_ctrl.cons.count;
cdc.prod_flags = conn->local_tx_ctrl.prod_flags;
cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
cdc.prod.wrap = curs.wrap;
cdc.prod.count = curs.count;
curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
cdc.cons.wrap = curs.wrap;
cdc.cons.count = curs.count;
cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
if (rc)
return rc;
smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons,
conn);
smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
/* Calculate transmitted data and increment free send buffer space */
diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
&conn->tx_curs_sent);
@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
static void smcd_cdc_rx_tsklet(unsigned long data)
{
struct smc_connection *conn = (struct smc_connection *)data;
struct smcd_cdc_msg *data_cdc;
struct smcd_cdc_msg cdc;
struct smc_sock *smc;
if (!conn)
return;
memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc));
data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
smc = container_of(conn, struct smc_sock, conn);
smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
}

View File

@ -48,21 +48,31 @@ struct smc_cdc_msg {
struct smc_cdc_producer_flags prod_flags;
struct smc_cdc_conn_state_flags conn_state_flags;
u8 reserved[18];
} __packed; /* format defined in RFC7609 */
};
/* SMC-D cursor format */
union smcd_cdc_cursor {
struct {
u16 wrap;
u32 count;
struct smc_cdc_producer_flags prod_flags;
struct smc_cdc_conn_state_flags conn_state_flags;
} __packed;
#ifdef KERNEL_HAS_ATOMIC64
atomic64_t acurs; /* for atomic processing */
#else
u64 acurs; /* for atomic processing */
#endif
} __aligned(8);
/* CDC message for SMC-D */
struct smcd_cdc_msg {
struct smc_wr_rx_hdr common; /* Type = 0xFE */
u8 res1[7];
u16 prod_wrap;
u32 prod_count;
u8 res2[2];
u16 cons_wrap;
u32 cons_count;
struct smc_cdc_producer_flags prod_flags;
struct smc_cdc_conn_state_flags conn_state_flags;
union smcd_cdc_cursor prod;
union smcd_cdc_cursor cons;
u8 res3[8];
} __packed;
} __aligned(8);
static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
{
@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
#endif
}
static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
union smcd_cdc_cursor *src,
struct smc_connection *conn)
{
#ifndef KERNEL_HAS_ATOMIC64
unsigned long flags;
spin_lock_irqsave(&conn->acurs_lock, flags);
tgt->acurs = src->acurs;
spin_unlock_irqrestore(&conn->acurs_lock, flags);
#else
atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
#endif
}
/* calculate cursor difference between old and new, where old <= new */
static inline int smc_curs_diff(unsigned int size,
union smc_host_cursor *old,
@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
struct smcd_cdc_msg *peer)
{
local->prod.wrap = peer->prod_wrap;
local->prod.count = peer->prod_count;
local->cons.wrap = peer->cons_wrap;
local->cons.count = peer->cons_count;
local->prod_flags = peer->prod_flags;
local->conn_state_flags = peer->conn_state_flags;
union smc_host_cursor temp;
temp.wrap = peer->prod.wrap;
temp.count = peer->prod.count;
atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs));
temp.wrap = peer->cons.wrap;
temp.count = peer->cons.count;
atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs));
local->prod_flags = peer->cons.prod_flags;
local->conn_state_flags = peer->cons.conn_state_flags;
}
static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,

View File

@ -184,6 +184,8 @@ free:
if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
smc_llc_link_inactive(lnk);
if (lgr->is_smcd)
smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr);
}
}
@ -485,7 +487,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
}
/* Called when SMC-D device is terminated or peer is lost */
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
{
struct smc_link_group *lgr, *l;
LIST_HEAD(lgr_free_list);
@ -495,7 +497,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (lgr->is_smcd && lgr->smcd == dev &&
(!peer_gid || lgr->peer_gid == peer_gid) &&
!list_empty(&lgr->list)) {
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
__smc_lgr_terminate(lgr);
list_move(&lgr->list, &lgr_free_list);
}
@ -506,6 +508,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
list_del_init(&lgr->list);
cancel_delayed_work_sync(&lgr->free_work);
if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */
smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr);
}
}
@ -559,7 +563,7 @@ out:
static bool smcr_lgr_match(struct smc_link_group *lgr,
struct smc_clc_msg_local *lcl,
enum smc_lgr_role role)
enum smc_lgr_role role, u32 clcqpn)
{
return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
SMC_SYSTEMID_LEN) &&
@ -567,7 +571,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
SMC_GID_SIZE) &&
!memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
sizeof(lcl->mac)) &&
lgr->role == role;
lgr->role == role &&
(lgr->role == SMC_SERV ||
lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
}
static bool smcd_lgr_match(struct smc_link_group *lgr,
@ -578,7 +584,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
/* create a new SMC connection (and a new link group if necessary) */
int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
struct smc_ib_device *smcibdev, u8 ibport,
struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
u64 peer_gid)
{
@ -603,7 +609,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
list_for_each_entry(lgr, &smc_lgr_list.list, list) {
write_lock_bh(&lgr->conns_lock);
if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
smcr_lgr_match(lgr, lcl, role)) &&
smcr_lgr_match(lgr, lcl, role, clcqpn)) &&
!lgr->sync_err &&
lgr->vlan_id == vlan_id &&
(role == SMC_CLNT ||
@ -1024,6 +1030,8 @@ void smc_core_exit(void)
smc_llc_link_inactive(lnk);
}
cancel_delayed_work_sync(&lgr->free_work);
if (lgr->is_smcd)
smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr); /* free link group */
}
}

View File

@ -247,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr);
void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
unsigned short vlan);
int smc_buf_create(struct smc_sock *smc, bool is_smcd);
int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn,
@ -262,7 +263,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);
void smc_conn_free(struct smc_connection *conn);
int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
struct smc_ib_device *smcibdev, u8 ibport,
struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
u64 peer_gid);
void smcd_conn_free(struct smc_connection *conn);

View File

@ -187,22 +187,28 @@ struct smc_ism_event_work {
#define ISM_EVENT_REQUEST 0x0001
#define ISM_EVENT_RESPONSE 0x0002
#define ISM_EVENT_REQUEST_IR 0x00000001
#define ISM_EVENT_CODE_SHUTDOWN 0x80
#define ISM_EVENT_CODE_TESTLINK 0x83
union smcd_sw_event_info {
u64 info;
struct {
u8 uid[SMC_LGR_ID_SIZE];
unsigned short vlan_id;
u16 code;
};
};
static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
{
union {
u64 info;
struct {
u32 uid;
unsigned short vlanid;
u16 code;
};
} ev_info;
union smcd_sw_event_info ev_info;
switch (wrk->event.code) {
case ISM_EVENT_CODE_TESTLINK: /* Activity timer */
ev_info.info = wrk->event.info;
switch (wrk->event.code) {
case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */
smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id);
break;
case ISM_EVENT_CODE_TESTLINK: /* Activity timer */
if (ev_info.code == ISM_EVENT_REQUEST) {
ev_info.code = ISM_EVENT_RESPONSE;
wrk->smcd->ops->signal_event(wrk->smcd,
@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
}
}
int smc_ism_signal_shutdown(struct smc_link_group *lgr)
{
int rc;
union smcd_sw_event_info ev_info;
memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
ev_info.vlan_id = lgr->vlan_id;
ev_info.code = ISM_EVENT_REQUEST;
rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
ISM_EVENT_REQUEST_IR,
ISM_EVENT_CODE_SHUTDOWN,
ev_info.info);
return rc;
}
/* worker for SMC-D events */
static void smc_ism_event_work(struct work_struct *work)
{
@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work)
switch (wrk->event.type) {
case ISM_EVENT_GID: /* GID event, token is peer GID */
smc_smcd_terminate(wrk->smcd, wrk->event.tok);
smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK);
break;
case ISM_EVENT_DMB:
break;
@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
spin_unlock(&smcd_dev_list.lock);
flush_workqueue(smcd->event_wq);
destroy_workqueue(smcd->event_wq);
smc_smcd_terminate(smcd, 0);
smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
device_del(&smcd->dev);
}

View File

@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
void *data, size_t len);
int smc_ism_signal_shutdown(struct smc_link_group *lgr);
#endif

View File

@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link,
pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
if (pend->idx < link->wr_tx_cnt) {
u32 idx = pend->idx;
/* clear the full struct smc_wr_tx_pend including .priv */
memset(&link->wr_tx_pends[pend->idx], 0,
sizeof(link->wr_tx_pends[pend->idx]));
memset(&link->wr_tx_bufs[pend->idx], 0,
sizeof(link->wr_tx_bufs[pend->idx]));
test_and_clear_bit(pend->idx, link->wr_tx_mask);
test_and_clear_bit(idx, link->wr_tx_mask);
return 1;
}