Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix handling of pinned BPF map nodes in hash of maps, from Daniel
    Borkmann.

 2) IPSEC ESP error paths leak memory, from Steffen Klassert.

 3) We need an RCU grace period before freeing fib6_node objects, from
    Wei Wang.

 4) Must check skb_put_padto() return value in HSR driver, from FLorian
    Fainelli.

 5) Fix oops on PHY probe failure in ftgmac100 driver, from Andrew
    Jeffery.

 6) Fix infinite loop in UDP queue when using SO_PEEK_OFF, from Eric
    Dumazet.

 7) Use after free when tcf_chain_destroy() called multiple times, from
    Jiri Pirko.

 8) Fix KSZ DSA tag layer multiple free of SKBS, from Florian Fainelli.

 9) Fix leak of uninitialized memory in sctp_get_sctp_info(),
    inet_diag_msg_sctpladdrs_fill() and inet_diag_msg_sctpaddrs_fill().
    From Stefano Brivio.

10) L2TP tunnel refcount fixes from Guillaume Nault.

11) Don't leak UDP secpath in udp_set_dev_scratch(), from Yossi
    Kauperman.

12) Revert a PHY layer change wrt. handling of PHY_HALTED state in
    phy_stop_machine(), it causes regressions for multiple people. From
    Florian Fainelli.

13) When packets are sent out of br0 we have to clear the
    offload_fwdq_mark value.

14) Several NULL pointer deref fixes in packet schedulers when their
    ->init() routine fails. From Nikolay Aleksandrov.

15) Aquantium devices cannot checksum offload correctly when the packet
    is <= 60 bytes. From Pavel Belous.

16) Fix vnet header access past end of buffer in AF_PACKET, from
    Benjamin Poirier.

17) Double free in probe error paths of nfp driver, from Dan Carpenter.

18) QOS capability not checked properly in DCB init paths of mlx5
    driver, from Huy Nguyen.

19) Fix conflicts between firmware load failure and health_care timer in
    mlx5, also from Huy Nguyen.

20) Fix dangling page pointer when DMA mapping errors occur in mlx5,
    from Eran Ben ELisha.

21) ->ndo_setup_tc() in bnxt_en driver doesn't count rings properly,
    from Michael Chan.

22) Missing MSIX vector free in bnxt_en, also from Michael Chan.

23) Refcount leak in xfrm layer when using sk_policy, from Lorenzo
    Colitti.

24) Fix copy of uninitialized data in qlge driver, from Arnd Bergmann.

25) bpf_setsockopts() erroneously always returns -EINVAL even on
    success. Fix from Yuchung Cheng.

26) tipc_rcv() needs to linearize the SKB before parsing the inner
    headers, from Parthasarathy Bhuvaragan.

27) Fix deadlock between link status updates and link removal in netvsc
    driver, from Stephen Hemminger.

28) Missed locking of page fragment handling in ESP output, from Steffen
    Klassert.

29) Fix refcnt leak in ebpf congestion control code, from Sabrina
    Dubroca.

30) sxgbe_probe_config_dt() doesn't check devm_kzalloc()'s return value,
    from Christophe Jaillet.

31) Fix missing ipv6 rx_dst_cookie update when rx_dst is updated during
    early demux, from Paolo Abeni.

32) Several info leaks in xfrm_user layer, from Mathias Krause.

33) Fix out of bounds read in cxgb4 driver, from Stefano Brivio.

34) Properly propagate obsolete state of route upwards in ipv6 so that
    upper holders like xfrm can see it. From Xin Long.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (118 commits)
  udp: fix secpath leak
  bridge: switchdev: Clear forward mark when transmitting packet
  mlxsw: spectrum: Forbid linking to devices that have uppers
  wl1251: add a missing spin_lock_init()
  Revert "net: phy: Correctly process PHY_HALTED in phy_stop_machine()"
  net: dsa: bcm_sf2: Fix number of CFP entries for BCM7278
  kcm: do not attach PF_KCM sockets to avoid deadlock
  sch_tbf: fix two null pointer dereferences on init failure
  sch_sfq: fix null pointer dereference on init failure
  sch_netem: avoid null pointer deref on init failure
  sch_fq_codel: avoid double free on init failure
  sch_cbq: fix null pointer dereferences on init failure
  sch_hfsc: fix null pointer deref and double free on init failure
  sch_hhf: fix null pointer dereference on init failure
  sch_multiq: fix double free on init failure
  sch_htb: fix crash on init failure
  net/mlx5e: Fix CQ moderation mode not set properly
  net/mlx5e: Fix inline header size for small packets
  net/mlx5: E-Switch, Unload the representors in the correct order
  net/mlx5e: Properly resolve TC offloaded ipv6 vxlan tunnel source address
  ...
This commit is contained in:
Linus Torvalds 2017-09-01 12:49:03 -07:00
commit 8cf9f2a29f
119 changed files with 820 additions and 563 deletions

View File

@ -1048,6 +1048,7 @@ struct bcm_sf2_of_data {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
unsigned int num_cfp_rules;
};
/* Register offsets for the SWITCH_REG_* block */
@ -1071,6 +1072,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
.type = BCM7445_DEVICE_ID,
.core_reg_align = 0,
.reg_offsets = bcm_sf2_7445_reg_offsets,
.num_cfp_rules = 256,
};
static const u16 bcm_sf2_7278_reg_offsets[] = {
@ -1093,6 +1095,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
.type = BCM7278_DEVICE_ID,
.core_reg_align = 1,
.reg_offsets = bcm_sf2_7278_reg_offsets,
.num_cfp_rules = 128,
};
static const struct of_device_id bcm_sf2_of_match[] = {
@ -1149,6 +1152,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->type = data->type;
priv->reg_offsets = data->reg_offsets;
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for

View File

@ -72,6 +72,7 @@ struct bcm_sf2_priv {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
unsigned int num_cfp_rules;
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;

View File

@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
{
u32 reg;
WARN_ON(addr >= CFP_NUM_RULES);
WARN_ON(addr >= priv->num_cfp_rules);
reg = core_readl(priv, CORE_CFP_ACC);
reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
{
/* Entry #0 is reserved */
return CFP_NUM_RULES - 1;
return priv->num_cfp_rules - 1;
}
static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
if (!(reg & OP_STR_DONE))
break;
} while (index < CFP_NUM_RULES);
} while (index < priv->num_cfp_rules);
/* Put the TCAM size here */
nfc->data = bcm_sf2_cfp_rule_size(priv);
@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
case ETHTOOL_GRXCLSRLCNT:
/* Subtract the default, unusable rule */
nfc->rule_cnt = bitmap_weight(priv->cfp.used,
CFP_NUM_RULES) - 1;
priv->num_cfp_rules) - 1;
/* We support specifying rule locations */
nfc->data |= RX_CLS_LOC_SPECIAL;
break;

View File

@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
return 0;
}
static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
{
int ret;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
return 0;
return;
if (!IS_ENABLED(CONFIG_MDIO_XGENE))
return 0;
return;
ret = xgene_enet_phy_connect(pdata->ndev);
if (!ret)
pdata->mdio_driver = true;
return 0;
return;
}
static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
@ -1779,10 +1779,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
ret = xgene_enet_check_phy_handle(pdata);
if (ret)
return ret;
xgene_enet_gpiod_get(pdata);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
xgene_enet_check_phy_handle(pdata);
ret = xgene_enet_init_hw(pdata);
if (ret)
goto err;
goto err2;
link_state = pdata->mac_ops->link_state;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev)
spin_lock_init(&pdata->stats_lock);
ret = xgene_extd_stats_init(pdata);
if (ret)
goto err2;
goto err1;
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
goto err2;
goto err1;
}
return 0;
err2:
err1:
/*
* If necessary, free_netdev() will call netif_napi_del() and undo
* the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
*/
xgene_enet_delete_desc_rings(pdata);
err2:
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
err1:
xgene_enet_delete_desc_rings(pdata);
err:
free_netdev(ndev);
return ret;

View File

@ -105,8 +105,7 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_get_link_status)(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status);
int (*hw_get_link_status)(struct aq_hw_s *self);
int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);

View File

@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
else
cfg->vecs = 1U;
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param)
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
unsigned int i = 0U;
struct aq_hw_link_status_s link_status;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
if (err < 0)
goto err_exit;
self->link_status = self->aq_hw->aq_link_status;
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
self->aq_nic_cfg.is_interrupt_moderation);
self->aq_nic_cfg.is_interrupt_moderation);
if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
if (link_status.mbps) {
aq_utils_obj_set(&self->header.flags,
AQ_NIC_FLAG_STARTED);
aq_utils_obj_clear(&self->header.flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
} else {
netif_carrier_off(self->ndev);
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
self->link_status = link_status;
if (self->link_status.mbps) {
aq_utils_obj_set(&self->header.flags,
AQ_NIC_FLAG_STARTED);
aq_utils_obj_clear(&self->header.flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
} else {
netif_carrier_off(self->ndev);
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
@ -597,14 +596,11 @@ exit:
}
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
__releases(&ring->lock)
__acquires(&ring->lock)
{
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = NETDEV_TX_OK;
bool is_nic_in_bad_state;
@ -628,36 +624,21 @@ __acquires(&ring->lock)
goto err_exit;
}
do {
if (spin_trylock(&ring->header.lock)) {
frags = aq_nic_map_skb(self, skb, ring);
frags = aq_nic_map_skb(self, skb, ring);
if (likely(frags)) {
err = self->aq_hw_ops.hw_ring_tx_xmit(
self->aq_hw,
ring, frags);
if (err >= 0) {
if (aq_ring_avail_dx(ring) <
AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(
self,
ring->idx);
if (likely(frags)) {
err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
ring,
frags);
if (err >= 0) {
if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(self, ring->idx);
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
} else {
err = NETDEV_TX_BUSY;
}
spin_unlock(&ring->header.lock);
break;
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
} while (--trys);
if (!trys) {
} else {
err = NETDEV_TX_BUSY;
goto err_exit;
}
err_exit:
@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
++self->mc_list.count;
if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
break;
}
return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
/* Number of filters is too big: atlantic does not support this.
* Force all multi filter to support this.
* With this we disable all UC filters and setup "all pass"
* multicast mask
*/
self->packet_filter |= IFF_ALLMULTI;
self->aq_hw->aq_nic_cfg->mc_list_count = 0;
return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
self->packet_filter);
} else {
return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
}
}
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)

View File

@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
spin_lock_init(&self->header.lock);
return 0;
}

View File

@ -17,7 +17,6 @@
#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
struct aq_obj_s {
spinlock_t lock; /* spinlock for nic/rings processing */
atomic_t flags;
};

View File

@ -34,8 +34,6 @@ struct aq_vec_s {
#define AQ_VEC_RX_ID 1
static int aq_vec_poll(struct napi_struct *napi, int budget)
__releases(&self->lock)
__acquires(&self->lock)
{
struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
struct aq_ring_s *ring = NULL;
@ -47,7 +45,7 @@ __acquires(&self->lock)
if (!self) {
err = -EINVAL;
} else if (spin_trylock(&self->header.lock)) {
} else {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
if (self->aq_hw_ops->hw_ring_tx_head_update) {
@ -105,11 +103,8 @@ __acquires(&self->lock)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
1U << self->aq_ring_param.vec_idx);
}
err_exit:
spin_unlock(&self->header.lock);
}
err_exit:
return work_done;
}
@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
spin_lock_init(&self->header.lock);
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = aq_ring_init(&ring[AQ_VEC_TX_ID]);

View File

@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
/* Checksum offload workaround for small packets */
if (rxd_wb->pkt_len <= 60) {
buff->is_ip_cso = 0U;
buff->is_cso_err = 0U;
}
}
is_err &= ~0x18U;

View File

@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
/* Checksum offload workaround for small packets */
if (rxd_wb->pkt_len <= 60) {
buff->is_ip_cso = 0U;
buff->is_cso_err = 0U;
}
}
is_err &= ~0x18U;

View File

@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
aq_hw_read_reg(self, 0x18U));
if (err < 0)
pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
AQ_CFG_DRV_NAME,
aq_hw_caps->fw_ver_expected,
aq_hw_read_reg(self, 0x18U));
return err;
}
@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
err_exit:;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status)
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
if (!link_speed_mask) {
link_status->mbps = 0U;

View File

@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
enum hal_atl_utils_fw_state_e state);
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status);
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,

View File

@ -597,7 +597,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
dev_kfree_skb_any(cb->skb);
dev_consume_skb_any(cb->skb);
cb->skb = NULL;
dma_unmap_addr_set(cb, dma_addr, 0);
}
@ -1346,6 +1346,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
if (!ring->cbs) {
dma_free_coherent(kdev, sizeof(struct dma_desc),
ring->desc_cpu, ring->desc_dma);
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
return -ENOMEM;
}

View File

@ -4647,7 +4647,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@ -4687,16 +4686,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
mutex_unlock(&bp->hwrm_cmd_lock);
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
eth_hw_addr_random(bp->dev);
rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
}
return rc;
#endif
}
@ -7152,6 +7141,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
@ -7661,6 +7651,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp)
bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
}
static int bnxt_init_mac_addr(struct bnxt *bp)
{
int rc = 0;
if (BNXT_PF(bp)) {
memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
eth_hw_addr_random(bp->dev);
rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
}
#endif
}
return rc;
}
static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@ -7789,7 +7801,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1;
goto init_err_pci_clean;
}
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
rc = -EADDRNOTAVAIL;
goto init_err_pci_clean;
}
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",

View File

@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
if (ulp->msix_requested)
edev->en_ops->bnxt_free_msix(edev, ulp_id);
}
if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);

View File

@ -1360,7 +1360,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
if (skb) {
pkts_compl++;
bytes_compl += GENET_CB(skb)->bytes_sent;
dev_kfree_skb_any(skb);
dev_consume_skb_any(skb);
}
txbds_processed++;
@ -1875,7 +1875,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
cb = ring->cbs + i;
skb = bcmgenet_rx_refill(priv, cb);
if (skb)
dev_kfree_skb_any(skb);
dev_consume_skb_any(skb);
if (!cb->skb)
return -ENOMEM;
}
@ -1894,7 +1894,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
if (skb)
dev_kfree_skb_any(skb);
dev_consume_skb_any(skb);
}
}

View File

@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
}
/* Copy in the new mailbox command and send it on its way ... */
t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
t4_record_mbox(adap, cmd, size, access, 0);
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
}
ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
t4_record_mbox(adap, cmd, size, access, ret);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);

View File

@ -1863,7 +1863,6 @@ err_setup_mdio:
err_ioremap:
release_resource(priv->res);
err_req_mem:
netif_napi_del(&priv->napi);
free_netdev(netdev);
err_alloc_etherdev:
return err;

View File

@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
pdev->dev.of_node = node;
pdev->dev.parent = priv->dev;
set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
ret = platform_device_add_data(pdev, &data, sizeof(data));

View File

@ -6504,7 +6504,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct resource *res;
const char *dt_mac_addr;
const char *mac_from;
char hw_mac_addr[ETH_ALEN];
char hw_mac_addr[ETH_ALEN] = {0};
u32 id;
int features;
int phy_mode;

View File

@ -263,6 +263,7 @@ struct mlx5e_dcbx {
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
u8 cap;
};
#endif

View File

@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_dcbx *dcbx = &priv->dcbx;
u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
mode |= DCB_CAP_DCBX_HOST;
return mode;
return priv->dcbx.cap;
}
static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
/* set dcbx to fw controlled */
if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
dcbx->cap &= ~DCB_CAP_DCBX_HOST;
return 0;
}
@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
return 1;
dcbx->cap = mode;
return 0;
}
@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
*cap = false;
break;
case DCB_CAP_ATTR_DCBX:
*cap = (DCB_CAP_DCBX_LLD_MANAGED |
DCB_CAP_DCBX_VER_CEE |
DCB_CAP_DCBX_STATIC);
*cap = priv->dcbx.cap |
DCB_CAP_DCBX_VER_CEE |
DCB_CAP_DCBX_VER_IEEE;
break;
default:
*cap = 0;
@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
if (!MLX5_CAP_GEN(priv->mdev, qos))
return;
if (MLX5_CAP_GEN(priv->mdev, dcbx))
mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
DCB_CAP_DCBX_VER_IEEE;
if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
mlx5e_ets_init(priv);
}

View File

@ -641,8 +641,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (!netif_is_rxfh_configured(priv->netdev))
mlx5e_build_default_indir_rqt(priv->mdev,
new_channels.params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;

View File

@ -1969,6 +1969,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
param->cq_period_mode = params->rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,

View File

@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
if (unlikely(!page))
return -ENOMEM;
dma_info->page = page;
dma_info->addr = dma_map_page(rq->pdev, page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(page);
return -ENOMEM;
}
dma_info->page = page;
return 0;
}

View File

@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
ret = dst->error;
if (ret) {
dst_release(dst);
ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
fl6);
if (ret < 0)
return ret;
}
*out_ttl = ip6_dst_hoplimit(dst);

View File

@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
return mlx5e_skb_l2_header_offset(skb);
}
static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
struct sk_buff *skb)
static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
struct sk_buff *skb)
{
int hlen;
u16 hlen;
switch (mode) {
case MLX5_INLINE_MODE_NONE:
@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
hlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
hlen += VLAN_HLEN;
return hlen;
break;
case MLX5_INLINE_MODE_IP:
/* When transport header is set to zero, it means no transport
* header. When transport header is set to 0xff's, it means
* transport header wasn't set.
*/
if (skb_transport_offset(skb))
return mlx5e_skb_l3_header_offset(skb);
if (skb_transport_offset(skb)) {
hlen = mlx5e_skb_l3_header_offset(skb);
break;
}
/* fall through */
case MLX5_INLINE_MODE_L2:
default:
return mlx5e_skb_l2_header_offset(skb);
hlen = mlx5e_skb_l2_header_offset(skb);
}
return min_t(u16, hlen, skb->len);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,

View File

@ -815,7 +815,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
struct mlx5_eswitch_rep *rep;
int vport;
for (vport = 0; vport < nvports; vport++) {
for (vport = nvports - 1; vport >= 0; vport--) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->valid)
continue;

View File

@ -1186,7 +1186,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
}
clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
mutex_unlock(&dev->intf_state_mutex);
@ -1261,7 +1260,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_drain_health_recovery(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
if (cleanup)
@ -1270,7 +1269,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
@ -1565,8 +1563,6 @@ static void shutdown(struct pci_dev *pdev)
int err;
dev_info(&pdev->dev, "Shutdown was called\n");
/* Notify mlx5 clients that the kernel is being shut down */
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, priv, false);

View File

@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
/* arm_srq structs missing using identical xrc ones */
u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));

View File

@ -4139,6 +4139,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
if (!info->linking)
break;
if (netdev_has_any_upper_dev(upper_dev))
return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
@ -4258,6 +4260,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev))
return -EINVAL;
if (!info->linking)
break;
if (netdev_has_any_upper_dev(upper_dev))
return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;

View File

@ -705,6 +705,7 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_port_mc_router)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
@ -715,11 +716,17 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
if (!bridge_port->bridge_device->multicast_enabled)
return 0;
goto out;
return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_MC,
is_port_mc_router);
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_MC,
is_port_mc_router);
if (err)
return err;
out:
bridge_port->mrouter = is_port_mc_router;
return 0;
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,

View File

@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
memset(frame, 0, sizeof(struct nfp_flower_meta_two));
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
if (mask_version) {
frame->tci = cpu_to_be16(~0);
return;
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
flow_vlan = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_VLAN,
target);
/* Populate the tci field. */
if (flow_vlan->vlan_id) {
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
flow_vlan->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
flow_vlan->vlan_id) |
NFP_FLOWER_MASK_VLAN_CFI;
frame->tci = cpu_to_be16(tmp_tci);
}
}
flow_vlan = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_VLAN,
flow->key);
/* Populate the tci field. */
if (!flow_vlan->vlan_id) {
tmp_tci = 0;
} else {
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
flow_vlan->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
flow_vlan->vlan_id) |
NFP_FLOWER_MASK_VLAN_CFI;
}
frame->tci = cpu_to_be16(tmp_tci);
}
static void
@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_eth_addrs *flow_mac;
flow_mac = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
target);
struct flow_dissector_key_eth_addrs *addr;
memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
/* Populate mac frame. */
ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]);
ether_addr_copy(frame->mac_src, &flow_mac->src[0]);
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
addr = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
target);
/* Populate mac frame. */
ether_addr_copy(frame->mac_dst, &addr->dst[0]);
ether_addr_copy(frame->mac_src, &addr->src[0]);
}
if (mask_version)
frame->mpls_lse = cpu_to_be32(~0);
@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ports *flow_tp;
struct flow_dissector_key_ports *tp;
flow_tp = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_PORTS,
target);
memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
frame->port_src = flow_tp->src;
frame->port_dst = flow_tp->dst;
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
tp = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_PORTS,
target);
frame->port_src = tp->src;
frame->port_dst = tp->dst;
}
}
static void
@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv4_addrs *flow_ipv4;
struct flow_dissector_key_basic *flow_basic;
struct flow_dissector_key_ipv4_addrs *addr;
struct flow_dissector_key_basic *basic;
flow_ipv4 = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
target);
flow_basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
target);
/* Populate IPv4 frame. */
frame->reserved = 0;
frame->ipv4_src = flow_ipv4->src;
frame->ipv4_dst = flow_ipv4->dst;
frame->proto = flow_basic->ip_proto;
/* Wildcard TOS/TTL for now. */
frame->tos = 0;
frame->ttl = 0;
memset(frame, 0, sizeof(struct nfp_flower_ipv4));
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
addr = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
target);
frame->ipv4_src = addr->src;
frame->ipv4_dst = addr->dst;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
target);
frame->proto = basic->ip_proto;
}
}
static void
@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv6_addrs *flow_ipv6;
struct flow_dissector_key_basic *flow_basic;
struct flow_dissector_key_ipv6_addrs *addr;
struct flow_dissector_key_basic *basic;
flow_ipv6 = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
target);
flow_basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
target);
/* Populate IPv6 frame. */
frame->reserved = 0;
frame->ipv6_src = flow_ipv6->src;
frame->ipv6_dst = flow_ipv6->dst;
frame->proto = flow_basic->ip_proto;
/* Wildcard LABEL/TOS/TTL for now. */
frame->ipv6_flow_label_exthdr = 0;
frame->tos = 0;
frame->ttl = 0;
memset(frame, 0, sizeof(struct nfp_flower_ipv6));
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
addr = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
target);
frame->ipv6_src = addr->src;
frame->ipv6_dst = addr->dst;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
target);
frame->proto = basic->ip_proto;
}
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,

View File

@ -105,43 +105,62 @@ static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow)
{
struct flow_dissector_key_control *mask_enc_ctl;
struct flow_dissector_key_basic *mask_basic;
struct flow_dissector_key_basic *key_basic;
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
mask_enc_ctl = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->mask);
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_dissector_key_control *mask_enc_ctl =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->mask);
/* We are expecting a tunnel. For now we ignore offloading. */
if (mask_enc_ctl->addr_type)
return -EOPNOTSUPP;
}
mask_basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
flow->mask);
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
mask_basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
flow->mask);
key_basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
flow->key);
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
mask_ip = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_IP,
flow->mask);
key_basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
flow->key);
key_layer_two = 0;
key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
key_size = sizeof(struct nfp_flower_meta_one) +
sizeof(struct nfp_flower_in_port) +
sizeof(struct nfp_flower_mac_mpls);
/* We are expecting a tunnel. For now we ignore offloading. */
if (mask_enc_ctl->addr_type)
return -EOPNOTSUPP;
if (mask_basic->n_proto) {
if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
if (mask_ip && mask_ip->tos)
return -EOPNOTSUPP;
if (mask_ip && mask_ip->ttl)
return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
if (mask_ip && mask_ip->tos)
return -EOPNOTSUPP;
if (mask_ip && mask_ip->ttl)
return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
/* Currently we do not offload MPLS. */
case cpu_to_be16(ETH_P_MPLS_UC):
case cpu_to_be16(ETH_P_MPLS_MC):
return -EOPNOTSUPP;
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
}
}
if (mask_basic->ip_proto) {
if (mask_basic && mask_basic->ip_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->ip_proto) {
case IPPROTO_TCP:

View File

@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
mutex_lock(&pf->lock);
if (num_vfs > pf->limit_vfs) {
nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
pf->limit_vfs);
err = -EINVAL;
goto err_unlock;
return -EINVAL;
}
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
goto err_unlock;
return err;
}
mutex_lock(&pf->lock);
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
dev_warn(&pdev->dev,
@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return num_vfs;
err_sriov_disable:
pci_disable_sriov(pdev);
err_unlock:
mutex_unlock(&pf->lock);
pci_disable_sriov(pdev);
return err;
#endif
return 0;
@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
pf->num_vfs = 0;
mutex_unlock(&pf->lock);
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
mutex_unlock(&pf->lock);
#endif
return 0;
}

View File

@ -895,6 +895,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
netdev_tx_sent_queue(nd_q, txbuf->real_len);
skb_tx_timestamp(skb);
tx_ring->wr_p += nr_frags + 1;
if (nfp_net_tx_ring_should_stop(tx_ring))
nfp_net_tx_ring_stop(nd_q, tx_ring);
@ -903,8 +905,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (!skb->xmit_more || netif_xmit_stopped(nd_q))
nfp_net_tx_xmit_more_flush(tx_ring);
skb_tx_timestamp(skb);
return NETDEV_TX_OK;
err_unmap:
@ -1751,6 +1751,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
if (likely(!meta.portid)) {
netdev = dp->netdev;
} else {
@ -1759,16 +1763,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nn = netdev_priv(dp->netdev);
netdev = nfp_app_repr_get(nn->app, meta.portid);
if (unlikely(!netdev)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
continue;
}
nfp_repr_inc_rx_stats(netdev, pkt_len);
}
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);

View File

@ -456,13 +456,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
{
int err;
err = nfp_net_pf_app_start_ctrl(pf);
if (err)
return err;
err = nfp_app_start(pf->app, pf->ctrl_vnic);
if (err)
goto err_ctrl_stop;
return err;
if (pf->num_vfs) {
err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
@ -474,8 +470,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
err_app_stop:
nfp_app_stop(pf->app);
err_ctrl_stop:
nfp_net_pf_app_stop_ctrl(pf);
return err;
}
@ -484,7 +478,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf)
if (pf->num_vfs)
nfp_app_sriov_disable(pf->app);
nfp_app_stop(pf->app);
nfp_net_pf_app_stop_ctrl(pf);
}
static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
@ -559,7 +552,7 @@ err_unmap_ctrl:
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
nfp_net_pf_app_stop(pf);
nfp_net_pf_app_stop_ctrl(pf);
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
@ -690,6 +683,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
{
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
struct nfp_net *nn;
int stride;
int err;
@ -766,7 +760,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_free_vnics;
err = nfp_net_pf_app_start(pf);
err = nfp_net_pf_app_start_ctrl(pf);
if (err)
goto err_free_irqs;
@ -774,12 +768,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_stop_app;
err = nfp_net_pf_app_start(pf);
if (err)
goto err_clean_vnics;
mutex_unlock(&pf->lock);
return 0;
err_clean_vnics:
list_for_each_entry(nn, &pf->vnics, vnic_list)
if (nfp_net_is_data_vnic(nn))
nfp_net_pf_clean_vnic(pf, nn);
err_stop_app:
nfp_net_pf_app_stop(pf);
nfp_net_pf_app_stop_ctrl(pf);
err_free_irqs:
nfp_net_pf_free_irqs(pf);
err_free_vnics:
@ -803,6 +805,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
if (list_empty(&pf->vnics))
goto out;
nfp_net_pf_app_stop(pf);
list_for_each_entry(nn, &pf->vnics, vnic_list)
if (nfp_net_is_data_vnic(nn))
nfp_net_pf_clean_vnic(pf, nn);

View File

@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*

View File

@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (skb) {
tp->dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
dev_consume_skb_any(skb);
tx_skb->skb = NULL;
}
}
@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
tp->tx_stats.packets++;
tp->tx_stats.bytes += tx_skb->skb->len;
u64_stats_update_end(&tp->tx_stats.syncp);
dev_kfree_skb_any(tx_skb->skb);
dev_consume_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;

View File

@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
if (!plat->mdio_bus_data)
return -ENOMEM;
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
if (!dma_cfg)

View File

@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
if (dwmac->f2h_ptp_ref_clk) {
if (dwmac->f2h_ptp_ref_clk ||
phymode == PHY_INTERFACE_MODE_MII ||
phymode == PHY_INTERFACE_MODE_GMII ||
phymode == PHY_INTERFACE_MODE_SGMII) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
&module);

View File

@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
}
static const struct of_device_id sun8i_dwmac_match[] = {
{ .compatible = "allwinner,sun8i-h3-emac",
.data = &emac_variant_h3 },
{ .compatible = "allwinner,sun8i-v3s-emac",
.data = &emac_variant_v3s },
{ .compatible = "allwinner,sun8i-a83t-emac",
.data = &emac_variant_a83t },
{ .compatible = "allwinner,sun50i-a64-emac",
.data = &emac_variant_a64 },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);

View File

@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
if (of_machine_is_compatible("ti,dra7"))
return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
dev_err(dev, "incompatible machine/device type for reading mac address\n");
dev_info(dev, "incompatible machine/device type for reading mac address\n");
return -ENOENT;
}
EXPORT_SYMBOL_GPL(ti_cm_get_macid);

View File

@ -1269,7 +1269,12 @@ static void netvsc_link_change(struct work_struct *w)
bool notify = false, reschedule = false;
unsigned long flags, next_reconfig, delay;
rtnl_lock();
/* if changes are happening, comeback later */
if (!rtnl_trylock()) {
schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
return;
}
net_device = rtnl_dereference(ndev_ctx->nvdev);
if (!net_device)
goto out_unlock;

View File

@ -3521,6 +3521,7 @@ module_init(macsec_init);
module_exit(macsec_exit);
MODULE_ALIAS_RTNL_LINK("macsec");
MODULE_ALIAS_GENL_FAMILY("macsec");
MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
MODULE_LICENSE("GPL v2");

View File

@ -749,9 +749,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
/* Now we can run the state machine synchronously */
phy_state_machine(&phydev->state_queue.work);
}
/**

View File

@ -864,15 +864,17 @@ EXPORT_SYMBOL(phy_attached_info);
#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)"
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
{
const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
if (!fmt) {
dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
phydev->drv->name, phydev_name(phydev),
drv_name, phydev_name(phydev),
phydev->irq);
} else {
va_list ap;
dev_info(&phydev->mdio.dev, ATTACHED_FMT,
phydev->drv->name, phydev_name(phydev),
drv_name, phydev_name(phydev),
phydev->irq);
va_start(ap, fmt);

View File

@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_noarp_info,
},
/* u-blox TOBY-L4 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
},
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),

View File

@ -1058,7 +1058,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len;
packets++;
dev_kfree_skb_any(skb);
dev_consume_skb_any(skb);
}
/* Avoid overhead when no packets have been processed

View File

@ -787,6 +787,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common functions that are used by gen2 transport */
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);

View File

@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
rxq->free_count += RX_CLAIM_REQ_ALLOC;
}
static void iwl_pcie_rx_allocator_work(struct work_struct *data)
void iwl_pcie_rx_allocator_work(struct work_struct *data)
{
struct iwl_rb_allocator *rba_p =
container_of(data, struct iwl_rb_allocator, rx_alloc);
@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
def_rxq = trans_pcie->rxq;
if (!rba->alloc_wq)
rba->alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
spin_lock(&rba->lock);
atomic_set(&rba->req_pending, 0);
@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
}
cancel_work_sync(&rba->rx_alloc);
if (rba->alloc_wq) {
destroy_workqueue(rba->alloc_wq);
rba->alloc_wq = NULL;
}
iwl_pcie_free_rbs_pool(trans);

View File

@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
if (trans_pcie->rba.alloc_wq) {
destroy_workqueue(trans_pcie->rba.alloc_wq);
trans_pcie->rba.alloc_wq = NULL;
}
if (trans_pcie->msix_enabled) {
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
irq_set_affinity_hint(
@ -3169,6 +3174,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
#ifdef CONFIG_IWLWIFI_PCIE_RTPM
trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
#else

View File

@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;

View File

@ -673,9 +673,7 @@ enum mlx5_device_state {
};
enum mlx5_interface_state {
MLX5_INTERFACE_STATE_DOWN = BIT(0),
MLX5_INTERFACE_STATE_UP = BIT(1),
MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
MLX5_INTERFACE_STATE_UP = BIT(0),
};
enum mlx5_pci_status {

View File

@ -3866,6 +3866,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
bool netdev_has_any_upper_dev(struct net_device *dev);
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,

View File

@ -973,7 +973,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
int skb_pad(struct sk_buff *skb, int pad);
int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
/**
* skb_pad - zero pad the tail of an skb
* @skb: buffer to pad
* @pad: space to pad
*
* Ensure that a buffer is followed by a padding area that is zero
* filled. Used by network drivers which may DMA or transfer data
* beyond the buffer end onto the wire.
*
* May return error in out of memory cases. The skb is freed on error.
*/
static inline int skb_pad(struct sk_buff *skb, int pad)
{
return __skb_pad(skb, pad, true);
}
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
@ -2821,6 +2837,31 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
return skb_pad(skb, len - size);
}
/**
* skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
* @free_on_error: free buffer on error
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error if @free_on_error is true.
*/
static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
bool free_on_error)
{
unsigned int size = skb->len;
if (unlikely(size < len)) {
len -= size;
if (__skb_pad(skb, len, free_on_error))
return -ENOMEM;
__skb_put(skb, len);
}
return 0;
}
/**
* skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
@ -2833,15 +2874,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
*/
static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
if (unlikely(size < len)) {
len -= size;
if (skb_pad(skb, len))
return -ENOMEM;
__skb_put(skb, len);
}
return 0;
return __skb_put_padto(skb, len, true);
}
static inline int skb_add_data(struct sk_buff *skb,

View File

@ -70,6 +70,7 @@ struct fib6_node {
__u16 fn_flags;
int fn_sernum;
struct rt6_info *rr_ptr;
struct rcu_head rcu;
};
#ifndef CONFIG_IPV6_SUBTREES
@ -104,7 +105,7 @@ struct rt6_info {
* the same cache line.
*/
struct fib6_table *rt6i_table;
struct fib6_node *rt6i_node;
struct fib6_node __rcu *rt6i_node;
struct in6_addr rt6i_gateway;
@ -167,13 +168,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
/* Function to safely get fn->sernum for passed in rt
* and store result in passed in cookie.
* Return true if we can get cookie safely
* Return false if not
*/
static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
u32 *cookie)
{
struct fib6_node *fn;
bool status = false;
rcu_read_lock();
fn = rcu_dereference(rt->rt6i_node);
if (fn) {
*cookie = fn->fn_sernum;
status = true;
}
rcu_read_unlock();
return status;
}
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
u32 cookie = 0;
if (rt->rt6i_flags & RTF_PCPU ||
(unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
rt = (struct rt6_info *)(rt->dst.from);
return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
rt6_get_cookie_safe(rt, &cookie);
return cookie;
}
static inline void ip6_rt_put(struct rt6_info *rt)

View File

@ -101,6 +101,13 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN)
return;
refcount_inc(&qdisc->refcnt);
}
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;

View File

@ -1004,9 +1004,7 @@ void tcp_get_default_congestion_control(char *name);
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load);
void tcp_reinit_congestion_control(struct sock *sk,
const struct tcp_congestion_ops *ca);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);

View File

@ -260,7 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
}
void udp_v4_early_demux(struct sk_buff *skb);
void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));

View File

@ -652,12 +652,27 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
}
}
static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
{
return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
BITS_PER_LONG == 64;
}
static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
{
u32 size = htab->map.value_size;
if (percpu || fd_htab_map_needs_adjust(htab))
size = round_up(size, 8);
return size;
}
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash,
bool percpu, bool onallcpus,
struct htab_elem *old_elem)
{
u32 size = htab->map.value_size;
u32 size = htab_size_value(htab, percpu);
bool prealloc = htab_is_prealloc(htab);
struct htab_elem *l_new, **pl_new;
void __percpu *pptr;
@ -696,9 +711,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
memcpy(l_new->key, key, key_size);
if (percpu) {
/* round up value_size to 8 bytes */
size = round_up(size, 8);
if (prealloc) {
pptr = htab_elem_get_ptr(l_new, key_size);
} else {
@ -1209,17 +1221,9 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
{
struct bpf_map *map;
if (attr->value_size != sizeof(u32))
return ERR_PTR(-EINVAL);
/* pointer is stored internally */
attr->value_size = sizeof(void *);
map = htab_map_alloc(attr);
attr->value_size = sizeof(u32);
return map;
return htab_map_alloc(attr);
}
static void fd_htab_map_free(struct bpf_map *map)

View File

@ -53,6 +53,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
brstats->tx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
#ifdef CONFIG_NET_SWITCHDEV
skb->offload_fwd_mark = 0;
#endif
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);

View File

@ -115,7 +115,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
void
br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
{
if (!fdb->added_by_user)
if (!fdb->added_by_user || !fdb->dst)
return;
switch (type) {

View File

@ -362,7 +362,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
if (flags & MSG_PEEK) {
err = -ENOENT;
spin_lock_bh(&sk_queue->lock);
if (skb == skb_peek(sk_queue)) {
if (skb->next) {
__skb_unlink(skb, sk_queue);
refcount_dec(&skb->users);
if (destructor)

View File

@ -5289,6 +5289,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
* Ideally, a new ndo_busy_poll_stop() could avoid another round.
*/
rc = napi->poll(napi, BUSY_POLL_BUDGET);
trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
netpoll_poll_unlock(have_poll_lock);
if (rc == BUSY_POLL_BUDGET)
__napi_schedule(napi);
@ -5667,12 +5668,13 @@ EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
* Find out if a device is linked to an upper device and return true in case
* it is. The caller must hold the RTNL lock.
*/
static bool netdev_has_any_upper_dev(struct net_device *dev)
bool netdev_has_any_upper_dev(struct net_device *dev)
{
ASSERT_RTNL();
return !list_empty(&dev->adj_list.upper);
}
EXPORT_SYMBOL(netdev_has_any_upper_dev);
/**
* netdev_master_upper_dev_get - Get master upper device

View File

@ -2836,15 +2836,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_prot->setsockopt == tcp_setsockopt) {
if (optname == TCP_CONGESTION) {
char name[TCP_CA_NAME_MAX];
bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
strncpy(name, optval, min_t(long, optlen,
TCP_CA_NAME_MAX-1));
name[TCP_CA_NAME_MAX-1] = 0;
ret = tcp_set_congestion_control(sk, name, false);
if (!ret && bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN)
/* replacing an existing ca */
tcp_reinit_congestion_control(sk,
inet_csk(sk)->icsk_ca_ops);
ret = tcp_set_congestion_control(sk, name, false, reinit);
} else {
struct tcp_sock *tp = tcp_sk(sk);
@ -2872,7 +2869,6 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
ret = -EINVAL;
}
}
ret = -EINVAL;
#endif
} else {
ret = -EINVAL;

View File

@ -1363,18 +1363,20 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
EXPORT_SYMBOL(skb_copy_expand);
/**
* skb_pad - zero pad the tail of an skb
* __skb_pad - zero pad the tail of an skb
* @skb: buffer to pad
* @pad: space to pad
* @free_on_error: free buffer on error
*
* Ensure that a buffer is followed by a padding area that is zero
* filled. Used by network drivers which may DMA or transfer data
* beyond the buffer end onto the wire.
*
* May return error in out of memory cases. The skb is freed on error.
* May return error in out of memory cases. The skb is freed on error
* if @free_on_error is true.
*/
int skb_pad(struct sk_buff *skb, int pad)
int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
{
int err;
int ntail;
@ -1403,10 +1405,11 @@ int skb_pad(struct sk_buff *skb, int pad)
return 0;
free_skb:
kfree_skb(skb);
if (free_on_error)
kfree_skb(skb);
return err;
}
EXPORT_SYMBOL(skb_pad);
EXPORT_SYMBOL(__skb_pad);
/**
* pskb_put - add data to the tail of a potentially fragmented buffer

View File

@ -577,7 +577,7 @@ static int dsa_dst_parse(struct dsa_switch_tree *dst)
return err;
}
if (!dst->cpu_dp->netdev) {
if (!dst->cpu_dp) {
pr_warn("Tree has no master device\n");
return -EINVAL;
}

View File

@ -42,7 +42,8 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
if (skb_put_padto(skb, skb->len + padlen))
/* Let dsa_slave_xmit() free skb */
if (__skb_put_padto(skb, skb->len + padlen, false))
return NULL;
nskb = skb;
@ -60,12 +61,13 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
skb_transport_header(skb) - skb->head);
skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
if (skb_put_padto(nskb, nskb->len + padlen)) {
kfree_skb(nskb);
/* Let skb_put_padto() free nskb, and let dsa_slave_xmit() free
* skb
*/
if (skb_put_padto(nskb, nskb->len + padlen))
return NULL;
}
kfree_skb(skb);
consume_skb(skb);
}
tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);

View File

@ -40,7 +40,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
kfree_skb(skb);
consume_skb(skb);
if (padlen) {
skb_put_zero(nskb, padlen);

View File

@ -314,7 +314,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
skb_put_padto(skb, ETH_ZLEN + HSR_HLEN);
if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
return;
hsr_forward_skb(skb, master);
return;

View File

@ -258,7 +258,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
esp_output_udp_encap(x, skb, esp);
if (!skb_cloned(skb)) {
if (tailen <= skb_availroom(skb)) {
if (tailen <= skb_tailroom(skb)) {
nfrags = 1;
trailer = skb;
tail = skb_tail_pointer(trailer);
@ -292,8 +292,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
kunmap_atomic(vaddr);
spin_unlock_bh(&x->lock);
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
@ -301,6 +299,9 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
skb_shinfo(skb)->nr_frags = ++nfrags;
pfrag->offset = pfrag->offset + allocsize;
spin_unlock_bh(&x->lock);
nfrags++;
skb->len += tailen;
@ -381,7 +382,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
goto error;
goto error_free;
if (!esp->inplace) {
int allocsize;
@ -392,7 +393,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
spin_lock_bh(&x->lock);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
goto error;
goto error_free;
}
skb_shinfo(skb)->nr_frags = 1;
@ -409,7 +410,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
goto error;
goto error_free;
}
if ((x->props.flags & XFRM_STATE_ESN))
@ -442,8 +443,9 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
if (sg != dsg)
esp_ssg_unref(x, tmp);
kfree(tmp);
error_free:
kfree(tmp);
error:
return err;
}
@ -695,8 +697,10 @@ skip_cow:
sg_init_table(sg, nfrags);
err = skb_to_sgvec(skb, sg, 0, skb->len);
if (unlikely(err < 0))
if (unlikely(err < 0)) {
kfree(tmp);
goto out;
}
skb->ip_summed = CHECKSUM_NONE;

View File

@ -257,7 +257,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp_output_tail(x, skb, &esp);
if (err < 0)
if (err)
return err;
secpath_reset(skb);

View File

@ -268,14 +268,14 @@ unsigned int arpt_do_table(struct sk_buff *skb,
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
/* Target might have changed stuff. */
arp = arp_hdr(skb);
if (verdict == XT_CONTINUE)
if (verdict == XT_CONTINUE) {
/* Target might have changed stuff. */
arp = arp_hdr(skb);
e = arpt_next_entry(e);
else
} else {
/* Verdict */
break;
}
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);
local_bh_enable();

View File

@ -352,13 +352,14 @@ ipt_do_table(struct sk_buff *skb,
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
/* Target might have changed stuff. */
ip = ip_hdr(skb);
if (verdict == XT_CONTINUE)
if (verdict == XT_CONTINUE) {
/* Target might have changed stuff. */
ip = ip_hdr(skb);
e = ipt_next_entry(e);
else
} else {
/* Verdict */
break;
}
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);

View File

@ -117,7 +117,8 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
* functions are also incrementing the refcount on their own,
* so it's safe to remove the entry even if it's in use. */
#ifdef CONFIG_PROC_FS
proc_remove(c->pde);
if (cn->procdir)
proc_remove(c->pde);
#endif
return;
}
@ -815,6 +816,7 @@ static void clusterip_net_exit(struct net *net)
#ifdef CONFIG_PROC_FS
struct clusterip_net *cn = net_generic(net, clusterip_net_id);
proc_remove(cn->procdir);
cn->procdir = NULL;
#endif
nf_unregister_net_hook(net, &cip_arp_ops);
}

View File

@ -2481,7 +2481,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
name[val] = 0;
lock_sock(sk);
err = tcp_set_congestion_control(sk, name, true);
err = tcp_set_congestion_control(sk, name, true, true);
release_sock(sk);
return err;
}

View File

@ -189,8 +189,8 @@ void tcp_init_congestion_control(struct sock *sk)
INET_ECN_dontxmit(sk);
}
void tcp_reinit_congestion_control(struct sock *sk,
const struct tcp_congestion_ops *ca)
static void tcp_reinit_congestion_control(struct sock *sk,
const struct tcp_congestion_ops *ca)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@ -338,7 +338,7 @@ out:
* tcp_reinit_congestion_control (if the current congestion control was
* already initialized.
*/
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load)
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_congestion_ops *ca;
@ -360,9 +360,18 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load)
if (!ca) {
err = -ENOENT;
} else if (!load) {
icsk->icsk_ca_ops = ca;
if (!try_module_get(ca->owner))
const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
if (try_module_get(ca->owner)) {
if (reinit) {
tcp_reinit_congestion_control(sk, ca);
} else {
icsk->icsk_ca_ops = ca;
module_put(old_ca->owner);
}
} else {
err = -EBUSY;
}
} else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
err = -EPERM;

View File

@ -1176,7 +1176,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
scratch->is_linear = !skb_is_nonlinear(skb);
#endif
if (likely(!skb->_skb_refdst))
if (likely(!skb->_skb_refdst && !skb_sec_path(skb)))
scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
}
@ -1929,14 +1929,16 @@ drop:
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old;
if (dst_hold_safe(dst)) {
old = xchg(&sk->sk_rx_dst, dst);
dst_release(old);
return old != dst;
}
return false;
}
EXPORT_SYMBOL(udp_sk_rx_dst_set);

View File

@ -5556,7 +5556,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
* our DAD process, so we don't need
* to do it again
*/
if (!(ifp->rt->rt6i_node))
if (!rcu_access_pointer(ifp->rt->rt6i_node))
ip6_ins_rt(ifp->rt);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);

View File

@ -226,7 +226,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
int tailen = esp->tailen;
if (!skb_cloned(skb)) {
if (tailen <= skb_availroom(skb)) {
if (tailen <= skb_tailroom(skb)) {
nfrags = 1;
trailer = skb;
tail = skb_tail_pointer(trailer);
@ -260,8 +260,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
kunmap_atomic(vaddr);
spin_unlock_bh(&x->lock);
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
@ -269,6 +267,9 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
skb_shinfo(skb)->nr_frags = ++nfrags;
pfrag->offset = pfrag->offset + allocsize;
spin_unlock_bh(&x->lock);
nfrags++;
skb->len += tailen;
@ -345,7 +346,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
goto error;
goto error_free;
if (!esp->inplace) {
int allocsize;
@ -356,7 +357,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
spin_lock_bh(&x->lock);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
goto error;
goto error_free;
}
skb_shinfo(skb)->nr_frags = 1;
@ -373,7 +374,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
if (unlikely(err < 0))
goto error;
goto error_free;
}
if ((x->props.flags & XFRM_STATE_ESN))
@ -406,8 +407,9 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
if (sg != dsg)
esp_ssg_unref(x, tmp);
kfree(tmp);
error_free:
kfree(tmp);
error:
return err;
}

View File

@ -286,7 +286,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp6_output_tail(x, skb, &esp);
if (err < 0)
if (err)
return err;
secpath_reset(skb);

View File

@ -148,11 +148,23 @@ static struct fib6_node *node_alloc(void)
return fn;
}
static void node_free(struct fib6_node *fn)
static void node_free_immediate(struct fib6_node *fn)
{
kmem_cache_free(fib6_node_kmem, fn);
}
static void node_free_rcu(struct rcu_head *head)
{
struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
kmem_cache_free(fib6_node_kmem, fn);
}
static void node_free(struct fib6_node *fn)
{
call_rcu(&fn->rcu, node_free_rcu);
}
static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
{
int cpu;
@ -601,9 +613,9 @@ insert_above:
if (!in || !ln) {
if (in)
node_free(in);
node_free_immediate(in);
if (ln)
node_free(ln);
node_free_immediate(ln);
return ERR_PTR(-ENOMEM);
}
@ -877,7 +889,7 @@ add:
rt->dst.rt6_next = iter;
*ins = rt;
rt->rt6i_node = fn;
rcu_assign_pointer(rt->rt6i_node, fn);
atomic_inc(&rt->rt6i_ref);
if (!info->skip_notify)
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
@ -903,7 +915,7 @@ add:
return err;
*ins = rt;
rt->rt6i_node = fn;
rcu_assign_pointer(rt->rt6i_node, fn);
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
if (!info->skip_notify)
@ -1038,7 +1050,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
root, and then (in failure) stale node
in main tree.
*/
node_free(sfn);
node_free_immediate(sfn);
err = PTR_ERR(sn);
goto failure;
}
@ -1468,8 +1480,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
int fib6_del(struct rt6_info *rt, struct nl_info *info)
{
struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
struct net *net = info->nl_net;
struct fib6_node *fn = rt->rt6i_node;
struct rt6_info **rtp;
#if RT6_DEBUG >= 2
@ -1658,7 +1671,9 @@ static int fib6_clean_node(struct fib6_walker *w)
if (res) {
#if RT6_DEBUG >= 2
pr_debug("%s: del failed: rt=%p@%p err=%d\n",
__func__, rt, rt->rt6i_node, res);
__func__, rt,
rcu_access_pointer(rt->rt6i_node),
res);
#endif
continue;
}
@ -1780,8 +1795,10 @@ static int fib6_age(struct rt6_info *rt, void *arg)
}
gc_args->more++;
} else if (rt->rt6i_flags & RTF_CACHE) {
if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout))
rt->dst.obsolete = DST_OBSOLETE_KILL;
if (atomic_read(&rt->dst.__refcnt) == 1 &&
time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
rt->dst.obsolete == DST_OBSOLETE_KILL) {
RT6_TRACE("aging clone %p\n", rt);
return -1;
} else if (rt->rt6i_flags & RTF_GATEWAY) {

View File

@ -242,7 +242,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
pktopt = xchg(&np->pktoptions, NULL);
kfree_skb(pktopt);
sk->sk_destruct = inet_sock_destruct;
/*
* ... and add it to the refcnt debug socks count
* in the new family. -acme

View File

@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
unsigned int len;
switch (**nexthdr) {
@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
len = ipv6_optlen(exthdr);
if (len + offset >= IPV6_MAXPLEN)
offset += ipv6_optlen(exthdr);
if (offset > IPV6_MAXPLEN)
return -EINVAL;
offset += len;
*nexthdr = &exthdr->nexthdr;
}

View File

@ -440,7 +440,8 @@ static bool rt6_check_expired(const struct rt6_info *rt)
if (time_after(jiffies, rt->dst.expires))
return true;
} else if (rt->dst.from) {
return rt6_check_expired((struct rt6_info *) rt->dst.from);
return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
rt6_check_expired((struct rt6_info *)rt->dst.from);
}
return false;
}
@ -1289,7 +1290,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
{
if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
u32 rt_cookie = 0;
if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
return NULL;
if (rt6_check_expired(rt))
@ -1357,8 +1360,14 @@ static void ip6_link_failure(struct sk_buff *skb)
if (rt->rt6i_flags & RTF_CACHE) {
if (dst_hold_safe(&rt->dst))
ip6_del_rt(rt);
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
rt->rt6i_node->fn_sernum = -1;
} else {
struct fib6_node *fn;
rcu_read_lock();
fn = rcu_dereference(rt->rt6i_node);
if (fn && (rt->rt6i_flags & RTF_DEFAULT))
fn->fn_sernum = -1;
rcu_read_unlock();
}
}
}
@ -1375,7 +1384,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
return !(rt->rt6i_flags & RTF_CACHE) &&
(rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
(rt->rt6i_flags & RTF_PCPU ||
rcu_access_pointer(rt->rt6i_node));
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,

View File

@ -768,6 +768,15 @@ start_lookup:
return 0;
}
static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
if (udp_sk_rx_dst_set(sk, dst)) {
const struct rt6_info *rt = (const struct rt6_info *)dst;
inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
}
}
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
@ -817,7 +826,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int ret;
if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst);
udp6_sk_rx_dst_set(sk, dst);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);

View File

@ -1383,6 +1383,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
if (!csk)
return -EINVAL;
/* We must prevent loops or risk deadlock ! */
if (csk->sk_family == PF_KCM)
return -EOPNOTSUPP;
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
if (!psock)
return -ENOMEM;

View File

@ -113,7 +113,6 @@ struct l2tp_net {
spinlock_t l2tp_session_hlist_lock;
};
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
{
@ -127,39 +126,6 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net)
return net_generic(net, l2tp_net_id);
}
/* Tunnel reference counts. Incremented per session that is added to
* the tunnel.
*/
static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
{
refcount_inc(&tunnel->ref_count);
}
static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
{
if (refcount_dec_and_test(&tunnel->ref_count))
l2tp_tunnel_free(tunnel);
}
#ifdef L2TP_REFCNT_DEBUG
#define l2tp_tunnel_inc_refcount(_t) \
do { \
pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
__func__, __LINE__, (_t)->name, \
refcount_read(&_t->ref_count)); \
l2tp_tunnel_inc_refcount_1(_t); \
} while (0)
#define l2tp_tunnel_dec_refcount(_t) \
do { \
pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
__func__, __LINE__, (_t)->name, \
refcount_read(&_t->ref_count)); \
l2tp_tunnel_dec_refcount_1(_t); \
} while (0)
#else
#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
#endif
/* Session hash global list for L2TPv3.
* The session_id SHOULD be random according to RFC3931, but several
* L2TP implementations use incrementing session_ids. So we do a real
@ -229,6 +195,27 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
}
/* Lookup a tunnel. A new reference is held on the returned tunnel. */
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
{
const struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_tunnel *tunnel;
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
if (tunnel->tunnel_id == tunnel_id) {
l2tp_tunnel_inc_refcount(tunnel);
rcu_read_unlock_bh();
return tunnel;
}
}
rcu_read_unlock_bh();
return NULL;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
/* Lookup a session. A new reference is held on the returned session.
* Optionally calls session->ref() too if do_ref is true.
*/
@ -1348,17 +1335,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
}
}
/* Really kill the tunnel.
* Come here only when all sessions have been cleared from the tunnel.
*/
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
BUG_ON(refcount_read(&tunnel->ref_count) != 0);
BUG_ON(tunnel->sock != NULL);
l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
kfree_rcu(tunnel, rcu);
}
/* Workqueue tunnel deletion function */
static void l2tp_tunnel_del_work(struct work_struct *work)
{
@ -1844,6 +1820,8 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
l2tp_session_set_header_len(session, tunnel->version);
refcount_set(&session->ref_count, 1);
err = l2tp_session_add_to_tunnel(tunnel, session);
if (err) {
kfree(session);
@ -1851,10 +1829,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
return ERR_PTR(err);
}
/* Bump the reference count. The session context is deleted
* only when this drops to zero.
*/
refcount_set(&session->ref_count, 1);
l2tp_tunnel_inc_refcount(tunnel);
/* Ensure tunnel socket isn't deleted */

View File

@ -231,6 +231,8 @@ out:
return tunnel;
}
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id, bool do_ref);
@ -269,6 +271,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
{
refcount_inc(&tunnel->ref_count);
}
static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
{
if (refcount_dec_and_test(&tunnel->ref_count))
kfree_rcu(tunnel, rcu);
}
/* Session reference counts. Incremented when code obtains a reference
* to a session.
*/

View File

@ -65,10 +65,12 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel)
tunnel = l2tp_tunnel_get(net, tunnel_id);
if (tunnel) {
session = l2tp_session_get(net, tunnel, session_id,
do_ref);
l2tp_tunnel_dec_refcount(tunnel);
}
}
return session;
@ -271,8 +273,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel == NULL) {
tunnel = l2tp_tunnel_get(net, tunnel_id);
if (!tunnel) {
ret = -ENODEV;
goto out;
}
@ -282,6 +284,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
(void) l2tp_tunnel_delete(tunnel);
l2tp_tunnel_dec_refcount(tunnel);
out:
return ret;
}
@ -299,8 +303,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel == NULL) {
tunnel = l2tp_tunnel_get(net, tunnel_id);
if (!tunnel) {
ret = -ENODEV;
goto out;
}
@ -311,6 +315,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
tunnel, L2TP_CMD_TUNNEL_MODIFY);
l2tp_tunnel_dec_refcount(tunnel);
out:
return ret;
}
@ -438,34 +444,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[L2TP_ATTR_CONN_ID]) {
ret = -EINVAL;
goto out;
goto err;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel == NULL) {
ret = -ENODEV;
goto out;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
goto err;
}
tunnel = l2tp_tunnel_get(net, tunnel_id);
if (!tunnel) {
ret = -ENODEV;
goto err_nlmsg;
}
ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
if (ret < 0)
goto err_out;
goto err_nlmsg_tunnel;
l2tp_tunnel_dec_refcount(tunnel);
return genlmsg_unicast(net, msg, info->snd_portid);
err_out:
err_nlmsg_tunnel:
l2tp_tunnel_dec_refcount(tunnel);
err_nlmsg:
nlmsg_free(msg);
out:
err:
return ret;
}
@ -509,8 +518,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
ret = -EINVAL;
goto out;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
tunnel = l2tp_tunnel_get(net, tunnel_id);
if (!tunnel) {
ret = -ENODEV;
goto out;
@ -518,24 +528,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
ret = -EINVAL;
goto out;
goto out_tunnel;
}
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
ret = -EINVAL;
goto out;
goto out_tunnel;
}
peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
ret = -EINVAL;
goto out;
goto out_tunnel;
}
cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
ret = -EINVAL;
goto out;
goto out_tunnel;
}
if (tunnel->version > 2) {
@ -557,7 +567,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
if (len > 8) {
ret = -EINVAL;
goto out;
goto out_tunnel;
}
cfg.cookie_len = len;
memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
@ -566,7 +576,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
if (len > 8) {
ret = -EINVAL;
goto out;
goto out_tunnel;
}
cfg.peer_cookie_len = len;
memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
@ -609,7 +619,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
(l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
ret = -EPROTONOSUPPORT;
goto out;
goto out_tunnel;
}
/* Check that pseudowire-specific params are present */
@ -619,7 +629,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
case L2TP_PWTYPE_ETH_VLAN:
if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
ret = -EINVAL;
goto out;
goto out_tunnel;
}
break;
case L2TP_PWTYPE_ETH:
@ -647,6 +657,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
}
}
out_tunnel:
l2tp_tunnel_dec_refcount(tunnel);
out:
return ret;
}

View File

@ -441,7 +441,7 @@ nf_nat_setup_info(struct nf_conn *ct,
else
ct->status |= IPS_DST_NAT;
if (nfct_help(ct))
if (nfct_help(ct) && !nfct_seqadj(ct))
if (!nfct_seqadj_ext_add(ct))
return NF_DROP;
}

View File

@ -305,7 +305,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
const struct nf_hook_ops *ops = &basechain->ops[0];
hook_mask = 1 << ops->hooknum;
if (!(hook_mask & target->hooks))
if (target->hooks && !(hook_mask & target->hooks))
return -EINVAL;
ret = nft_compat_chain_validate_dependency(target->table,
@ -484,7 +484,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
const struct nf_hook_ops *ops = &basechain->ops[0];
hook_mask = 1 << ops->hooknum;
if (!(hook_mask & match->hooks))
if (match->hooks && !(hook_mask & match->hooks))
return -EINVAL;
ret = nft_compat_chain_validate_dependency(match->table,

View File

@ -65,19 +65,23 @@ static int nft_limit_init(struct nft_limit *limit,
limit->nsecs = unit * NSEC_PER_SEC;
if (limit->rate == 0 || limit->nsecs < unit)
return -EOVERFLOW;
limit->tokens = limit->tokens_max = limit->nsecs;
if (tb[NFTA_LIMIT_BURST]) {
u64 rate;
if (tb[NFTA_LIMIT_BURST])
limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
else
limit->burst = 0;
rate = limit->rate + limit->burst;
if (rate < limit->rate)
return -EOVERFLOW;
if (limit->rate + limit->burst < limit->rate)
return -EOVERFLOW;
/* The token bucket size limits the number of tokens can be
* accumulated. tokens_max specifies the bucket size.
* tokens_max = unit * (rate + burst) / rate.
*/
limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
limit->rate);
limit->tokens_max = limit->tokens;
limit->rate = rate;
}
if (tb[NFTA_LIMIT_FLAGS]) {
u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
@ -95,9 +99,8 @@ static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
{
u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0;
u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
u64 rate = limit->rate - limit->burst;
if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate),
if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate),
NFTA_LIMIT_PAD) ||
nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
NFTA_LIMIT_PAD) ||

View File

@ -2191,6 +2191,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct timespec ts;
__u32 ts_status;
bool is_drop_n_account = false;
bool do_vnet = false;
/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
* We may add members to them until current aligned size without forcing
@ -2241,8 +2242,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
netoff = TPACKET_ALIGN(po->tp_hdrlen +
(maclen < 16 ? 16 : maclen)) +
po->tp_reserve;
if (po->has_vnet_hdr)
if (po->has_vnet_hdr) {
netoff += sizeof(struct virtio_net_hdr);
do_vnet = true;
}
macoff = netoff - maclen;
}
if (po->tp_version <= TPACKET_V2) {
@ -2259,8 +2262,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
skb_set_owner_r(copy_skb, sk);
}
snaplen = po->rx_ring.frame_size - macoff;
if ((int)snaplen < 0)
if ((int)snaplen < 0) {
snaplen = 0;
do_vnet = false;
}
}
} else if (unlikely(macoff + snaplen >
GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
@ -2273,6 +2278,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (unlikely((int)snaplen < 0)) {
snaplen = 0;
macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
do_vnet = false;
}
}
spin_lock(&sk->sk_receive_queue.lock);
@ -2298,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
spin_unlock(&sk->sk_receive_queue.lock);
if (po->has_vnet_hdr) {
if (do_vnet) {
if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
vio_le(), true)) {

View File

@ -215,9 +215,15 @@ static void tcf_chain_flush(struct tcf_chain *chain)
static void tcf_chain_destroy(struct tcf_chain *chain)
{
list_del(&chain->list);
tcf_chain_flush(chain);
kfree(chain);
/* May be already removed from the list by the previous call. */
if (!list_empty(&chain->list))
list_del_init(&chain->list);
/* There might still be a reference held when we got here from
* tcf_block_put. Wait for the user to drop reference before free.
*/
if (!chain->refcnt)
kfree(chain);
}
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
@ -288,8 +294,10 @@ void tcf_block_put(struct tcf_block *block)
if (!block)
return;
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
tcf_chain_flush(chain);
tcf_chain_destroy(chain);
}
kfree(block);
}
EXPORT_SYMBOL(tcf_block_put);

View File

@ -836,7 +836,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
old = dev_graft_qdisc(dev_queue, new);
if (new && i > 0)
refcount_inc(&new->refcnt);
qdisc_refcount_inc(new);
if (!ingress)
qdisc_destroy(old);
@ -847,7 +847,7 @@ skip:
notify_and_destroy(net, skb, n, classid,
dev->qdisc, new);
if (new && !new->ops->attach)
refcount_inc(&new->refcnt);
qdisc_refcount_inc(new);
dev->qdisc = new ? : &noop_qdisc;
if (new && new->ops->attach)
@ -1256,7 +1256,7 @@ replay:
if (q == p ||
(p && check_loop(q, p, 0)))
return -ELOOP;
refcount_inc(&q->refcnt);
qdisc_refcount_inc(q);
goto graft;
} else {
if (!q)

View File

@ -1139,6 +1139,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
struct tc_ratespec *r;
int err;
qdisc_watchdog_init(&q->watchdog, sch);
hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
q->delay_timer.function = cbq_undelay;
if (!opt)
return -EINVAL;
err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
if (err < 0)
return err;
@ -1177,9 +1184,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
qdisc_watchdog_init(&q->watchdog, sch);
hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
q->delay_timer.function = cbq_undelay;
q->toplevel = TC_CBQ_MAXLEVEL;
q->now = psched_get_time();

View File

@ -491,10 +491,8 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
if (!q->flows)
return -ENOMEM;
q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
if (!q->backlogs) {
kvfree(q->flows);
if (!q->backlogs)
return -ENOMEM;
}
for (i = 0; i < q->flows_cnt; i++) {
struct fq_codel_flow *flow = q->flows + i;

Some files were not shown because too many files have changed in this diff Show More