Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) IPv6 gre tunnels end up with different default features enabled
    depending upon whether netlink or ioctls are used to bring them up.
    Fix from Alexey Kodanev.

 2) Fix read past end of user control message in RDS< from Avinash
    Repaka.

 3) Missing RCU barrier in mini qdisc code, from Cong Wang.

 4) Missing policy put when reusing per-cpu route entries, from Florian
    Westphal.

 5) Handle nested PCI errors properly in bnx2x driver, from Guilherme G.
    Piccoli.

 6) Run nested transport mode IPSEC packets via tasklet, from Herbert
    Xu.

 7) Fix handling poll() for stream sockets in tipc, from Parthasarathy
    Bhuvaragan.

 8) Fix two stack-out-of-bounds issues in IPSEC, from Steffen Klassert.

 9) Another zerocopy ubuf handling fix, from Willem de Bruijn.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (33 commits)
  strparser: Call sock_owned_by_user_nocheck
  sock: Add sock_owned_by_user_nocheck
  skbuff: in skb_copy_ubufs unclone before releasing zerocopy
  tipc: fix hanging poll() for stream sockets
  sctp: Replace use of sockets_allocated with specified macro.
  bnx2x: Improve reliability in case of nested PCI errors
  tg3: Enable PHY reset in MTU change path for 5720
  tg3: Add workaround to restrict 5762 MRRS to 2048
  tg3: Update copyright
  net: fec: unmap the xmit buffer that are not transferred by DMA
  tipc: fix tipc_mon_delete() oops in tipc_enable_bearer() error path
  tipc: error path leak fixes in tipc_enable_bearer()
  RDS: Check cmsg_len before dereferencing CMSG_DATA
  tcp: Avoid preprocessor directives in tracepoint macro args
  tipc: fix memory leak of group member when peer node is lost
  net: sched: fix possible null pointer deref in tcf_block_put
  tipc: base group replicast ack counter on number of actual receivers
  net_sched: fix a missing rcu barrier in mini_qdisc_pair_swap()
  net: phy: micrel: ksz9031: reconfigure autoneg after phy autoneg workaround
  ip6_gre: fix device features for ioctl setup
  ...
This commit is contained in:
Linus Torvalds 2017-12-28 23:20:21 -08:00
commit 2758b3e3e6
30 changed files with 297 additions and 120 deletions

View File

@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
if (IS_PF(bp)) {
if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
bnx2x_drv_pulse(bp);
@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->cnic_loaded = false;
/* Clear driver version indication in shmem */
if (IS_PF(bp))
if (IS_PF(bp) && !BP_NOMCP(bp))
bnx2x_update_mng_version(bp);
/* Check if there are pending parity attentions. If there are - set

View File

@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
do {
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
/* If we read all 0xFFs, means we are in PCI error state and
* should bail out to avoid crashes on adapter's FW reads.
*/
if (bp->common.shmem_base == 0xFFFFFFFF) {
bp->flags |= NO_MCP_FLAG;
return -ENODEV;
}
if (bp->common.shmem_base) {
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if (val & SHR_MEM_VALIDITY_MB)
@ -14320,7 +14329,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
BNX2X_ERR("IO slot reset --> driver unload\n");
/* MCP should have been reset; Need to wait for validity */
bnx2x_init_shmem(bp);
if (bnx2x_init_shmem(bp)) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 v;

View File

@ -4,11 +4,13 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (C) 2005-2014 Broadcom Corporation.
* Copyright (C) 2005-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Limited.
*
* Firmware is:
* Derived from proprietary unpublished source code,
* Copyright (C) 2000-2003 Broadcom Corporation.
* Copyright (C) 2000-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Ltd.
*
* Permission is hereby granted for the distribution of this firmware
* data in hexadecimal or equivalent format, provided this copyright
@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tw32(GRC_MODE, tp->grc_mode | val);
/* On one of the AMD platform, MRRS is restricted to 4000 because of
* south bridge limitation. As a workaround, Driver is setting MRRS
* to 2048 instead of default 4096.
*/
if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
}
/* Setup the timer prescalar register. Clock is always 66Mhz. */
val = tr32(GRC_MISC_CFG);
val &= ~0xff;
@ -14227,7 +14239,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
*/
if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);

View File

@ -5,7 +5,8 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (C) 2007-2014 Broadcom Corporation.
* Copyright (C) 2007-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Limited.
*/
#ifndef _T3_H
@ -96,6 +97,7 @@
#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@ -281,6 +283,9 @@
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
/* 0xa8 --> 0xb8 unused */
#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
#define MAX_READ_REQ_SIZE_2048 0x00004000
#define MAX_READ_REQ_MASK 0x00007000
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004

View File

@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0);
if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(txq->tx_skbuff[i]);
txq->tx_skbuff[i] = NULL;

View File

@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
phydev->link = 0;
if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
phydev->drv->config_intr(phydev);
return genphy_config_aneg(phydev);
}
return 0;

View File

@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
pl->link_config.pause = MLO_PAUSE_AN;
pl->link_config.speed = SPEED_UNKNOWN;
pl->link_config.duplex = DUPLEX_UNKNOWN;
pl->link_config.an_enabled = true;
pl->ops = ops;
__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
@ -951,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
mutex_lock(&pl->state_mutex);
/* Configure the MAC to match the new settings */
linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
pl->link_config.interface = config.interface;
pl->link_config.speed = our_kset.base.speed;
pl->link_config.duplex = our_kset.base.duplex;
pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;

View File

@ -1514,6 +1514,11 @@ static inline bool sock_owned_by_user(const struct sock *sk)
return sk->sk_lock.owned;
}
static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
{
return sk->sk_lock.owned;
}
/* no reclassification while locks are held */
static inline bool sock_allow_reclassification(const struct sock *csk)
{

View File

@ -1570,6 +1570,9 @@ int xfrm_init_state(struct xfrm_state *x);
int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *));
int xfrm_output_resume(struct sk_buff *skb, int err);
int xfrm_output(struct sock *sk, struct sk_buff *skb);
int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);

View File

@ -25,6 +25,35 @@
tcp_state_name(TCP_CLOSING), \
tcp_state_name(TCP_NEW_SYN_RECV))
#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
do { \
struct in6_addr *pin6; \
\
pin6 = (struct in6_addr *)__entry->saddr_v6; \
ipv6_addr_set_v4mapped(saddr, pin6); \
pin6 = (struct in6_addr *)__entry->daddr_v6; \
ipv6_addr_set_v4mapped(daddr, pin6); \
} while (0)
#if IS_ENABLED(CONFIG_IPV6)
#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
do { \
if (sk->sk_family == AF_INET6) { \
struct in6_addr *pin6; \
\
pin6 = (struct in6_addr *)__entry->saddr_v6; \
*pin6 = saddr6; \
pin6 = (struct in6_addr *)__entry->daddr_v6; \
*pin6 = daddr6; \
} else { \
TP_STORE_V4MAPPED(__entry, saddr, daddr); \
} \
} while (0)
#else
#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
TP_STORE_V4MAPPED(__entry, saddr, daddr)
#endif
/*
* tcp event with arguments sk and skb
*
@ -50,7 +79,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
TP_fast_assign(
struct inet_sock *inet = inet_sk(sk);
struct in6_addr *pin6;
__be32 *p32;
__entry->skbaddr = skb;
@ -65,20 +93,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
p32 = (__be32 *) __entry->daddr;
*p32 = inet->inet_daddr;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
pin6 = (struct in6_addr *)__entry->saddr_v6;
*pin6 = sk->sk_v6_rcv_saddr;
pin6 = (struct in6_addr *)__entry->daddr_v6;
*pin6 = sk->sk_v6_daddr;
} else
#endif
{
pin6 = (struct in6_addr *)__entry->saddr_v6;
ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
pin6 = (struct in6_addr *)__entry->daddr_v6;
ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
}
TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@ -127,7 +143,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
TP_fast_assign(
struct inet_sock *inet = inet_sk(sk);
struct in6_addr *pin6;
__be32 *p32;
__entry->skaddr = sk;
@ -141,20 +156,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
p32 = (__be32 *) __entry->daddr;
*p32 = inet->inet_daddr;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
pin6 = (struct in6_addr *)__entry->saddr_v6;
*pin6 = sk->sk_v6_rcv_saddr;
pin6 = (struct in6_addr *)__entry->daddr_v6;
*pin6 = sk->sk_v6_daddr;
} else
#endif
{
pin6 = (struct in6_addr *)__entry->saddr_v6;
ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
pin6 = (struct in6_addr *)__entry->daddr_v6;
ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
}
TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@ -197,7 +200,6 @@ TRACE_EVENT(tcp_set_state,
TP_fast_assign(
struct inet_sock *inet = inet_sk(sk);
struct in6_addr *pin6;
__be32 *p32;
__entry->skaddr = sk;
@ -213,20 +215,8 @@ TRACE_EVENT(tcp_set_state,
p32 = (__be32 *) __entry->daddr;
*p32 = inet->inet_daddr;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
pin6 = (struct in6_addr *)__entry->saddr_v6;
*pin6 = sk->sk_v6_rcv_saddr;
pin6 = (struct in6_addr *)__entry->daddr_v6;
*pin6 = sk->sk_v6_daddr;
} else
#endif
{
pin6 = (struct in6_addr *)__entry->saddr_v6;
ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
pin6 = (struct in6_addr *)__entry->daddr_v6;
ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
}
TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@ -256,7 +246,6 @@ TRACE_EVENT(tcp_retransmit_synack,
TP_fast_assign(
struct inet_request_sock *ireq = inet_rsk(req);
struct in6_addr *pin6;
__be32 *p32;
__entry->skaddr = sk;
@ -271,20 +260,8 @@ TRACE_EVENT(tcp_retransmit_synack,
p32 = (__be32 *) __entry->daddr;
*p32 = ireq->ir_rmt_addr;
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
pin6 = (struct in6_addr *)__entry->saddr_v6;
*pin6 = ireq->ir_v6_loc_addr;
pin6 = (struct in6_addr *)__entry->daddr_v6;
*pin6 = ireq->ir_v6_rmt_addr;
} else
#endif
{
pin6 = (struct in6_addr *)__entry->saddr_v6;
ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6);
pin6 = (struct in6_addr *)__entry->daddr_v6;
ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6);
}
TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr,
ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
),
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",

View File

@ -1177,12 +1177,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
int i, new_frags;
u32 d_off;
if (!num_frags)
goto release;
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
return -EINVAL;
if (!num_frags)
goto release;
new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < new_frags; i++) {
page = alloc_page(gfp_mask);

View File

@ -23,6 +23,12 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
return xfrm4_extract_header(skb);
}
static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
return dst_input(skb);
}
static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
@ -33,7 +39,11 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
iph->tos, skb->dev))
goto drop;
}
return dst_input(skb);
if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2))
goto drop;
return 0;
drop:
kfree_skb(skb);
return NET_RX_DROP;

View File

@ -1014,6 +1014,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
eth_random_addr(dev->perm_addr);
}
#define GRE6_FEATURES (NETIF_F_SG | \
NETIF_F_FRAGLIST | \
NETIF_F_HIGHDMA | \
NETIF_F_HW_CSUM)
static void ip6gre_tnl_init_features(struct net_device *dev)
{
struct ip6_tnl *nt = netdev_priv(dev);
dev->features |= GRE6_FEATURES;
dev->hw_features |= GRE6_FEATURES;
if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
/* TCP offload with GRE SEQ is not supported, nor
* can we support 2 levels of outer headers requiring
* an update.
*/
if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
nt->encap.type == TUNNEL_ENCAP_NONE) {
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
/* Can use a lockless transmit, unless we generate
* output sequences
*/
dev->features |= NETIF_F_LLTX;
}
}
static int ip6gre_tunnel_init_common(struct net_device *dev)
{
struct ip6_tnl *tunnel;
@ -1048,6 +1078,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
ip6gre_tnl_init_features(dev);
return 0;
}
@ -1298,11 +1330,6 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
.ndo_get_iflink = ip6_tnl_get_iflink,
};
#define GRE6_FEATURES (NETIF_F_SG | \
NETIF_F_FRAGLIST | \
NETIF_F_HIGHDMA | \
NETIF_F_HW_CSUM)
static void ip6gre_tap_setup(struct net_device *dev)
{
@ -1383,26 +1410,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
nt->net = dev_net(dev);
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
dev->features |= GRE6_FEATURES;
dev->hw_features |= GRE6_FEATURES;
if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
/* TCP offload with GRE SEQ is not supported, nor
* can we support 2 levels of outer headers requiring
* an update.
*/
if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
(nt->encap.type == TUNNEL_ENCAP_NONE)) {
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
/* Can use a lockless transmit, unless we generate
* output sequences
*/
dev->features |= NETIF_F_LLTX;
}
err = register_netdevice(dev);
if (err)
goto out;

View File

@ -32,6 +32,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
}
EXPORT_SYMBOL(xfrm6_rcv_spi);
static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
if (xfrm_trans_queue(skb, ip6_rcv_finish))
__kfree_skb(skb);
return -1;
}
int xfrm6_transport_finish(struct sk_buff *skb, int async)
{
struct xfrm_offload *xo = xfrm_offload(skb);
@ -56,7 +64,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
ip6_rcv_finish);
xfrm6_transport_finish2);
return -1;
}

View File

@ -1009,6 +1009,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
continue;
if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
if (cmsg->cmsg_len <
CMSG_LEN(sizeof(struct rds_rdma_args)))
return -EINVAL;
args = CMSG_DATA(cmsg);
*rdma_bytes += args->remote_vec.bytes;
}

View File

@ -379,6 +379,8 @@ void tcf_block_put(struct tcf_block *block)
{
struct tcf_block_ext_info ei = {0, };
if (!block)
return;
tcf_block_put_ext(block, block->q, &ei);
}

View File

@ -1040,6 +1040,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
if (!tp_head) {
RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
/* Wait for flying RCU callback before it is freed. */
rcu_barrier_bh();
return;
}
@ -1055,7 +1057,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
rcu_assign_pointer(*miniqp->p_miniq, miniq);
if (miniq_old)
/* This is counterpart of the rcu barrier above. We need to
/* This is counterpart of the rcu barriers above. We need to
* block potential new user of miniq_old until all readers
* are not seeing it.
*/

View File

@ -4498,7 +4498,7 @@ static int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sk_sockets_allocated_inc(sk);
sock_prot_inuse_add(net, sk->sk_prot, 1);
/* Nothing can fail after this block, otherwise
@ -4542,7 +4542,7 @@ static void sctp_destroy_sock(struct sock *sk)
}
sctp_endpoint_free(sp->ep);
local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated);
sk_sockets_allocated_dec(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}

View File

@ -401,7 +401,7 @@ void strp_data_ready(struct strparser *strp)
* allows a thread in BH context to safely check if the process
* lock is held. In this case, if the lock is held, queue work.
*/
if (sock_owned_by_user(strp->sk)) {
if (sock_owned_by_user_nocheck(strp->sk)) {
queue_work(strp_wq, &strp->work);
return;
}

View File

@ -324,6 +324,7 @@ restart:
if (res) {
pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
name, -res);
kfree(b);
return -EINVAL;
}
@ -347,8 +348,10 @@ restart:
if (skb)
tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
if (tipc_mon_create(net, bearer_id))
if (tipc_mon_create(net, bearer_id)) {
bearer_disable(net, b);
return -ENOMEM;
}
pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name,

View File

@ -368,18 +368,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
u16 prev = grp->bc_snd_nxt - 1;
struct tipc_member *m;
struct rb_node *n;
u16 ackers = 0;
for (n = rb_first(&grp->members); n; n = rb_next(n)) {
m = container_of(n, struct tipc_member, tree_node);
if (tipc_group_is_enabled(m)) {
tipc_group_update_member(m, len);
m->bc_acked = prev;
ackers++;
}
}
/* Mark number of acknowledges to expect, if any */
if (ack)
grp->bc_ackers = grp->member_cnt;
grp->bc_ackers = ackers;
grp->bc_snd_nxt++;
}
@ -848,17 +850,26 @@ void tipc_group_member_evt(struct tipc_group *grp,
*usr_wakeup = true;
m->usr_pending = false;
node_up = tipc_node_is_up(net, node);
m->event_msg = NULL;
/* Hold back event if more messages might be expected */
if (m->state != MBR_LEAVING && node_up) {
m->event_msg = skb;
tipc_group_decr_active(grp, m);
m->state = MBR_LEAVING;
} else {
if (node_up)
if (node_up) {
/* Hold back event if a LEAVE msg should be expected */
if (m->state != MBR_LEAVING) {
m->event_msg = skb;
tipc_group_decr_active(grp, m);
m->state = MBR_LEAVING;
} else {
msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
else
__skb_queue_tail(inputq, skb);
}
} else {
if (m->state != MBR_LEAVING) {
tipc_group_decr_active(grp, m);
m->state = MBR_LEAVING;
msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
} else {
msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
}
__skb_queue_tail(inputq, skb);
}
list_del_init(&m->list);

View File

@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *self = get_self(net, bearer_id);
struct tipc_peer *self;
struct tipc_peer *peer, *tmp;
if (!mon)
return;
self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
tn->monitors[bearer_id] = NULL;
list_for_each_entry_safe(peer, tmp, &self->list, list) {

View File

@ -727,11 +727,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
switch (sk->sk_state) {
case TIPC_ESTABLISHED:
case TIPC_CONNECTING:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
revents |= POLLOUT;
/* fall thru' */
case TIPC_LISTEN:
case TIPC_CONNECTING:
if (!skb_queue_empty(&sk->sk_receive_queue))
revents |= POLLIN | POLLRDNORM;
break;

View File

@ -8,15 +8,29 @@
*
*/
#include <linux/bottom_half.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/percpu.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
struct xfrm_trans_tasklet {
struct tasklet_struct tasklet;
struct sk_buff_head queue;
};
struct xfrm_trans_cb {
int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
};
#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
static struct kmem_cache *secpath_cachep __read_mostly;
static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
@ -25,6 +39,8 @@ static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
static struct gro_cells gro_cells;
static struct net_device xfrm_napi_dev;
static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
{
int err = 0;
@ -207,7 +223,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
xfrm_address_t *daddr;
struct xfrm_mode *inner_mode;
u32 mark = skb->mark;
unsigned int family;
unsigned int family = AF_UNSPEC;
int decaps = 0;
int async = 0;
bool xfrm_gro = false;
@ -216,6 +232,16 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
if (encap_type < 0) {
x = xfrm_input_state(skb);
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
if (x->km.state == XFRM_STATE_ACQ)
XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
else
XFRM_INC_STATS(net,
LINUX_MIB_XFRMINSTATEINVALID);
goto drop;
}
family = x->outer_mode->afinfo->family;
/* An encap_type of -1 indicates async resumption. */
@ -467,9 +493,41 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
}
EXPORT_SYMBOL(xfrm_input_resume);
static void xfrm_trans_reinject(unsigned long data)
{
struct xfrm_trans_tasklet *trans = (void *)data;
struct sk_buff_head queue;
struct sk_buff *skb;
__skb_queue_head_init(&queue);
skb_queue_splice_init(&trans->queue, &queue);
while ((skb = __skb_dequeue(&queue)))
XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
}
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *))
{
struct xfrm_trans_tasklet *trans;
trans = this_cpu_ptr(&xfrm_trans_tasklet);
if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
return -ENOBUFS;
XFRM_TRANS_SKB_CB(skb)->finish = finish;
skb_queue_tail(&trans->queue, skb);
tasklet_schedule(&trans->tasklet);
return 0;
}
EXPORT_SYMBOL(xfrm_trans_queue);
void __init xfrm_input_init(void)
{
int err;
int i;
init_dummy_netdev(&xfrm_napi_dev);
err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
@ -480,4 +538,13 @@ void __init xfrm_input_init(void)
sizeof(struct sec_path),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
for_each_possible_cpu(i) {
struct xfrm_trans_tasklet *trans;
trans = &per_cpu(xfrm_trans_tasklet, i);
__skb_queue_head_init(&trans->queue);
tasklet_init(&trans->tasklet, xfrm_trans_reinject,
(unsigned long)trans);
}
}

View File

@ -1168,9 +1168,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
again:
pol = rcu_dereference(sk->sk_policy[dir]);
if (pol != NULL) {
bool match = xfrm_selector_match(&pol->selector, fl, family);
bool match;
int err = 0;
if (pol->family != family) {
pol = NULL;
goto out;
}
match = xfrm_selector_match(&pol->selector, fl, family);
if (match) {
if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
pol = NULL;
@ -1833,6 +1839,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
sizeof(struct xfrm_policy *) * num_pols) == 0 &&
xfrm_xdst_can_reuse(xdst, xfrm, err)) {
dst_hold(&xdst->u.dst);
xfrm_pols_put(pols, num_pols);
while (err > 0)
xfrm_state_put(xfrm[--err]);
return xdst;

View File

@ -1343,6 +1343,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
if (orig->aead) {
x->aead = xfrm_algo_aead_clone(orig->aead);
x->geniv = orig->geniv;
if (!x->aead)
goto error;
}

View File

@ -1419,11 +1419,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
{
u16 prev_family;
int i;
if (nr > XFRM_MAX_DEPTH)
return -EINVAL;
prev_family = family;
for (i = 0; i < nr; i++) {
/* We never validated the ut->family value, so many
* applications simply leave it at zero. The check was
@ -1435,6 +1438,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
if (!ut[i].family)
ut[i].family = family;
if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
(ut[i].family != prev_family))
return -EINVAL;
prev_family = ut[i].family;
switch (ut[i].family) {
case AF_INET:
break;
@ -1445,6 +1454,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
default:
return -EINVAL;
}
switch (ut[i].id.proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
case IPPROTO_COMP:
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ROUTING:
case IPPROTO_DSTOPTS:
#endif
case IPSEC_PROTO_ANY:
break;
default:
return -EINVAL;
}
}
return 0;
@ -2470,7 +2494,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
[XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
[XFRMA_OUTPUT_MARK] = { .len = NLA_U32 },
[XFRMA_OUTPUT_MARK] = { .type = NLA_U32 },
};
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {

View File

@ -523,21 +523,23 @@ static int do_show(int argc, char **argv)
break;
p_err("can't get next map: %s%s", strerror(errno),
errno == EINVAL ? " -- kernel too old?" : "");
return -1;
break;
}
fd = bpf_map_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
p_err("can't get map by id (%u): %s",
id, strerror(errno));
return -1;
break;
}
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
return -1;
break;
}
if (json_output)

View File

@ -382,6 +382,8 @@ static int do_show(int argc, char **argv)
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0) {
if (errno == ENOENT)
continue;
p_err("can't get prog by id (%u): %s",
id, strerror(errno));
err = -1;

View File

@ -39,7 +39,7 @@ $(BPFOBJ): force
CLANG ?= clang
LLC ?= llc
PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
# Let newer LLVM versions transparently probe the kernel for availability
# of full BPF instruction set.