Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Various TCP control block fixes, including one that crashes with
    SELinux, from David Ahern and Eric Dumazet.

 2) Fix ACK generation in rxrpc, from David Howells.

 3) ipvlan doesn't set the mark properly in the ipv4 route lookup key,
    from Gao Feng.

 4) SIT configuration doesn't take on the frag_off ipv4 field
    configuration properly, fix from Hangbin Liu.

 5) TSO can fail after device down/up on stmmac, fix from Lars Persson.

 6) Various bpftool fixes (mostly in JSON handling) from Quentin Monnet.

 7) Various SKB leak fixes in vhost/tun/tap (mostly observed as
    performance problems). From Wei Xu.

 8) mvpps's TX descriptors were not zero initialized, from Yan Markman.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (57 commits)
  tcp: use IPCB instead of TCP_SKB_CB in inet_exact_dif_match()
  tcp: add tcp_v4_fill_cb()/tcp_v4_restore_cb()
  rxrpc: Fix the MAINTAINERS record
  rxrpc: Use correct netns source in rxrpc_release_sock()
  liquidio: fix incorrect indentation of assignment statement
  stmmac: reset last TSO segment size after device open
  ipvlan: Add the skb->mark as flow4's member to lookup route
  s390/qeth: build max size GSO skbs on L2 devices
  s390/qeth: fix GSO throughput regression
  s390/qeth: fix thinko in IPv4 multicast address tracking
  tap: free skb if flags error
  tun: free skb in early errors
  vhost: fix skb leak in handle_rx()
  bnxt_en: Fix a variable scoping in bnxt_hwrm_do_send_msg()
  bnxt_en: fix dst/src fid for vxlan encap/decap actions
  bnxt_en: wildcard smac while creating tunnel decap filter
  bnxt_en: Need to unconditionally shut down RoCE in bnxt_shutdown
  phylink: ensure we take the link down when phylink_stop() is called
  sfp: warn about modules requiring address change sequence
  sfp: improve RX_LOS handling
  ...
This commit is contained in:
Linus Torvalds 2017-12-04 11:14:46 -08:00
commit 236fa078c6
54 changed files with 398 additions and 188 deletions

View File

@ -554,13 +554,13 @@ S: Orphan
F: Documentation/filesystems/affs.txt
F: fs/affs/
AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN
AFS FILESYSTEM
M: David Howells <dhowells@redhat.com>
L: linux-afs@lists.infradead.org
S: Supported
F: fs/afs/
F: include/net/af_rxrpc.h
F: net/rxrpc/af_rxrpc.c
F: include/trace/events/afs.h
F: Documentation/filesystems/afs.txt
W: https://www.infradead.org/~dhowells/kafs/
AGPGART DRIVER
@ -11776,6 +11776,18 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
S: Maintained
F: drivers/net/wireless/realtek/rtl8xxxu/
RXRPC SOCKETS (AF_RXRPC)
M: David Howells <dhowells@redhat.com>
L: linux-afs@lists.infradead.org
S: Supported
F: net/rxrpc/
F: include/keys/rxrpc-type.h
F: include/net/af_rxrpc.h
F: include/trace/events/rxrpc.h
F: include/uapi/linux/rxrpc.h
F: Documentation/networking/rxrpc.txt
W: https://www.infradead.org/~dhowells/kafs/
S3 SAVAGE FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>
L: linux-fbdev@vger.kernel.org

View File

@ -184,12 +184,12 @@
* Below is some version info we got:
* SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
* Filter? connected? Passive detection ception in MB
* MX25 FlexCAN2 03.00.00.00 no no ? no no
* MX25 FlexCAN2 03.00.00.00 no no no no no
* MX28 FlexCAN2 03.00.04.00 yes yes no no no
* MX35 FlexCAN2 03.00.00.00 no no ? no no
* MX35 FlexCAN2 03.00.00.00 no no no no no
* MX53 FlexCAN2 03.00.00.00 yes no no no no
* MX6s FlexCAN3 10.00.12.00 yes yes no no yes
* VF610 FlexCAN3 ? no yes ? yes yes?
* VF610 FlexCAN3 ? no yes no yes yes?
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
static const struct can_bittiming_const flexcan_bittiming_const = {

View File

@ -825,7 +825,10 @@ err_release_regions:
err_disable_pci:
pci_disable_device(pdev);
return err;
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
* the probe() function must return a negative errno in case of failure
* (err is unchanged if negative) */
return pcibios_err_to_errno(err);
}
/* free the board structure object, as well as its resources: */

View File

@ -717,7 +717,10 @@ failure_release_regions:
failure_disable_pci:
pci_disable_device(pdev);
return err;
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
* the probe() function must return a negative errno in case of failure
* (err is unchanged if negative) */
return pcibios_err_to_errno(err);
}
static void peak_pci_remove(struct pci_dev *pdev)

View File

@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
mbx_mask = hecc_read(priv, HECC_CANMIM);
mbx_mask |= HECC_TX_MBOX_MASK;
hecc_write(priv, HECC_CANMIM, mbx_mask);
} else {
/* repoll is done only if whole budget is used */
num_pkts = quota;
}
return num_pkts;

View File

@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
}
if (pos + tmp->len > actual_len) {
dev_err(dev->udev->dev.parent,
"Format error\n");
dev_err_ratelimited(dev->udev->dev.parent,
"Format error\n");
break;
}
@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
if (err) {
netdev_err(netdev, "Error transmitting URB\n");
usb_unanchor_urb(urb);
kfree(buf);
usb_free_urb(urb);
return err;
}
@ -1333,7 +1334,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
while (pos <= urb->actual_length - MSG_HEADER_LEN) {
while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
msg = urb->transfer_buffer + pos;
/* The Kvaser firmware can only read and write messages that
@ -1352,7 +1353,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
}
if (pos + msg->len > urb->actual_length) {
dev_err(dev->udev->dev.parent, "Format error\n");
dev_err_ratelimited(dev->udev->dev.parent,
"Format error\n");
break;
}
@ -1768,6 +1770,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
usb_unanchor_urb(urb);
kfree(buf);
stats->tx_dropped++;

View File

@ -592,6 +592,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
break;
case -ENOENT:
case -EPIPE:
case -ESHUTDOWN:
return;
@ -862,7 +863,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
goto cleanup_unregister_candev;
}
dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n");
dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");
return 0;

View File

@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
slice_num, false);
bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
slice_num, true);
SLICE_NUM_MASK, true);
/* Insert into TCAM now because we need to insert a second rule */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
/* Insert into Action and policer RAMs now, set chain ID to
* the one we are chained to
*/
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
queue_num, true);
if (ret)
goto out_err;

View File

@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
struct hwrm_short_input short_input = {0};
req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (bp->flags & BNXT_FLAG_SHORT_CMD) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
struct hwrm_short_input short_input = {0};
memcpy(short_cmd_req, req, msg_len);
memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
dev_close(dev);
bnxt_ulp_shutdown(bp);
if (system_state == SYSTEM_POWER_OFF) {
bnxt_ulp_shutdown(bp);
bnxt_clear_int_mode(bp);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);

View File

@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
{
int ifindex = tcf_mirred_ifindex(tc_act);
struct net_device *dev;
u16 dst_fid;
dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
if (!dev) {
@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
return -EINVAL;
}
/* find the FID from dev */
dst_fid = bnxt_flow_get_dst_fid(bp, dev);
if (dst_fid == BNXT_FID_INVALID) {
netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
return -EINVAL;
}
actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
actions->dst_fid = dst_fid;
actions->dst_dev = dev;
return 0;
}
@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
if (rc)
return rc;
/* Tunnel encap/decap action must be accompanied by a redirect action */
if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
!(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
netdev_info(bp->dev,
"error: no redir action along with encap/decap");
return -EINVAL;
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
/* dst_fid is PF's fid */
actions->dst_fid = bp->pf.fw_fid;
} else {
/* find the FID from dst_dev */
actions->dst_fid =
bnxt_flow_get_dst_fid(bp, actions->dst_dev);
if (actions->dst_fid == BNXT_FID_INVALID)
return -EINVAL;
}
}
return rc;
@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
ether_addr_copy(req.dst_macaddr, l2_info->dmac);
ether_addr_copy(req.src_macaddr, l2_info->smac);
}
if (l2_info->num_vlans) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,
static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
struct ip_tunnel_key *tun_key,
struct bnxt_tc_l2_key *l2_info,
struct net_device *real_dst_dev)
struct bnxt_tc_l2_key *l2_info)
{
#ifdef CONFIG_INET
struct net_device *real_dst_dev = bp->dev;
struct flowi4 flow = { {0} };
struct net_device *dst_dev;
struct neighbour *nbr;
@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
*/
tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
tun_key.tp_dst = flow->tun_key.tp_dst;
rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
if (rc)
goto put_decap;
decap_key->ttl = tun_key.ttl;
decap_l2_info = &decap_node->l2_info;
/* decap smac is wildcarded */
ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
if (l2_info.num_vlans) {
decap_l2_info->num_vlans = l2_info.num_vlans;
decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
goto done;
rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
flow->actions.dst_dev);
rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
if (rc)
goto put_encap;
@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
return 0;
}
static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid)
{
if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
flow->src_fid = bp->pf.fw_fid;
else
flow->src_fid = src_fid;
}
/* Add a new flow or replace an existing flow.
* Notes on locking:
* There are essentially two critical sections here.
@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
if (rc)
goto free_node;
flow->src_fid = src_fid;
bnxt_tc_set_src_fid(bp, flow, src_fid);
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -ENOSPC;

View File

@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)
dev_err(&oct->pci_dev->dev,
"ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
core);
err_msg_was_printed[core] = true;
err_msg_was_printed[core] = true;
}
}

View File

@ -5598,7 +5598,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
u32 txq_dma;
/* Allocate memory for TX descriptors */
aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
&aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)

View File

@ -808,8 +808,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
val, reg);
if (gmac->variant->soc_has_internal_phy) {
if (of_property_read_bool(priv->plat->phy_node,
"allwinner,leds-active-low"))
if (of_property_read_bool(node, "allwinner,leds-active-low"))
reg |= H3_EPHY_LED_POL;
else
reg &= ~H3_EPHY_LED_POL;

View File

@ -2588,6 +2588,7 @@ static int stmmac_open(struct net_device *dev)
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
priv->mss = 0;
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {

View File

@ -393,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
.flowi4_oif = dev->ifindex,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
.daddr = ip4h->daddr,
.saddr = ip4h->saddr,
};

View File

@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
}
/* Center KSZ9031RNX FLP timing at 16ms. */
static int ksz9031_center_flp_timing(struct phy_device *phydev)
{
int result;
/* Center KSZ9031RNX FLP timing at 16ms. */
result = ksz9031_extended_write(phydev, OP_DATA, 0,
MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
if (result)
return result;
result = ksz9031_extended_write(phydev, OP_DATA, 0,
MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
if (result)
return result;

View File

@ -773,6 +773,7 @@ void phylink_stop(struct phylink *pl)
sfp_upstream_stop(pl->sfp_bus);
set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
}
EXPORT_SYMBOL_GPL(phylink_stop);

View File

@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
{
unsigned int los = sfp->state & SFP_F_LOS;
/* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor
* SFP_OPTIONS_LOS_NORMAL are set? For now, we assume
* the same as SFP_OPTIONS_LOS_NORMAL set.
/* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
* are set, we assume that no LOS signal is available.
*/
if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED)
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
los ^= SFP_F_LOS;
else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
los = 0;
if (los)
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
sfp_sm_link_up(sfp);
}
static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
{
return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
event == SFP_E_LOS_LOW) ||
(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
event == SFP_E_LOS_HIGH);
}
static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
{
return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
event == SFP_E_LOS_HIGH) ||
(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
event == SFP_E_LOS_LOW);
}
static void sfp_sm_fault(struct sfp *sfp, bool warn)
{
if (sfp->sm_retries && !--sfp->sm_retries) {
@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
return -EINVAL;
}
/* If the module requires address swap mode, warn about it */
if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
dev_warn(sfp->dev,
"module address swap to access page 0xA2 is not supported.\n");
return sfp_module_insert(sfp->sfp_bus, &sfp->id);
}
@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
case SFP_S_WAIT_LOS:
if (event == SFP_E_TX_FAULT)
sfp_sm_fault(sfp, true);
else if (event ==
(sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
else if (sfp_los_event_inactive(sfp, event))
sfp_sm_link_up(sfp);
break;
@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
if (event == SFP_E_TX_FAULT) {
sfp_sm_link_down(sfp);
sfp_sm_fault(sfp, true);
} else if (event ==
(sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
} else if (sfp_los_event_active(sfp, event)) {
sfp_sm_link_down(sfp);
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
}
@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
{
/* locking... and check module is present */
if (sfp->id.ext.sff8472_compliance) {
if (sfp->id.ext.sff8472_compliance &&
!(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
} else {

View File

@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
DEFINE_WAIT(wait);
ssize_t ret = 0;
if (!iov_iter_count(to))
if (!iov_iter_count(to)) {
if (skb)
kfree_skb(skb);
return 0;
}
if (skb)
goto put;
@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
size_t total_len, int flags)
{
struct tap_queue *q = container_of(sock, struct tap_queue, sock);
struct sk_buff *skb = m->msg_control;
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
if (skb)
kfree_skb(skb);
return -EINVAL;
ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
m->msg_control);
}
ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;

View File

@ -1952,8 +1952,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (!iov_iter_count(to))
if (!iov_iter_count(to)) {
if (skb)
kfree_skb(skb);
return 0;
}
if (!skb) {
/* Read frames from ring */
@ -2069,22 +2072,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = tun_get(tfile);
struct sk_buff *skb = m->msg_control;
int ret;
if (!tun)
return -EBADFD;
if (!tun) {
ret = -EBADFD;
goto out_free_skb;
}
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
ret = -EINVAL;
goto out;
goto out_put_tun;
}
if (flags & MSG_ERRQUEUE) {
ret = sock_recv_errqueue(sock->sk, m, total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
m->msg_control);
ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@ -2092,6 +2097,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
out:
tun_put(tun);
return ret;
out_put_tun:
tun_put(tun);
out_free_skb:
if (skb)
kfree_skb(skb);
return ret;
}
static int tun_peek_len(struct socket *sock)

View File

@ -987,6 +987,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_recover_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);

View File

@ -20,6 +20,11 @@
#include <linux/mii.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/skbuff.h>
#include <net/iucv/af_iucv.h>
#include <net/dsfield.h>
@ -6439,6 +6444,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
/* GSO segmentation builds skbs with
* a (small) linear part for the headers, and
* page frags for the data.
* Compared to a linear skb, the header-only part consumes an
* additional buffer element. This reduces buffer utilization, and
* hurts throughput. So compress small segments into one element.
*/
if (netif_needs_gso(skb, features)) {
/* match skb_segment(): */
unsigned int doffset = skb->data - skb_mac_header(skb);
unsigned int hsize = skb_shinfo(skb)->gso_size;
unsigned int hroom = skb_headroom(skb);
/* linearize only if resulting skb allocations are order-0: */
if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
features &= ~NETIF_F_SG;
}
return vlan_features_check(skb, features);
}
EXPORT_SYMBOL_GPL(qeth_features_check);
static int __init qeth_core_init(void)
{
int rc;

View File

@ -961,6 +961,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_stop = qeth_l2_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@ -1011,6 +1012,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->hw_features = NETIF_F_SG;
card->dev->vlan_features = NETIF_F_SG;
card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
card->dev->hw_features |= NETIF_F_IP_CSUM;
@ -1029,8 +1031,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
netif_carrier_off(card->dev);

View File

@ -1377,6 +1377,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
memcpy(tmp->mac, buf, sizeof(tmp->mac));
tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp);
if (ipm) {
@ -2918,6 +2919,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_stop = qeth_l3_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_do_ioctl,
@ -2958,6 +2960,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->vlan_features = NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_TSO;
card->dev->features |= NETIF_F_SG;
}
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
@ -2985,8 +2988,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
netif_keep_dst(card->dev);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);

View File

@ -778,16 +778,6 @@ static void handle_rx(struct vhost_net *net)
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
goto out;
if (nvq->rx_array)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
err = sock->ops->recvmsg(sock, &msg,
1, MSG_DONTWAIT | MSG_TRUNC);
pr_debug("Discarded rx packet: len %zd\n", sock_len);
continue;
}
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@ -800,6 +790,16 @@ static void handle_rx(struct vhost_net *net)
* they refilled. */
goto out;
}
if (nvq->rx_array)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
err = sock->ops->recvmsg(sock, &msg,
1, MSG_DONTWAIT | MSG_TRUNC);
pr_debug("Discarded rx packet: len %zd\n", sock_len);
continue;
}
/* We don't need to be notified again. */
iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
fixup = msg.msg_iter;

View File

@ -1406,8 +1406,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
}
/*
* If users == 1, we are the only owner and are can avoid redundant
* atomic change.
* If users == 1, we are the only owner and can avoid redundant atomic changes.
*/
/**

View File

@ -503,7 +503,8 @@ struct sctp_datamsg {
/* Did the messenge fail to send? */
int send_error;
u8 send_failed:1,
can_delay; /* should this message be Nagle delayed */
can_delay:1, /* should this message be Nagle delayed */
abandoned:1; /* should this message be abandoned */
};
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,

View File

@ -14,7 +14,6 @@ struct tcf_sample {
struct psample_group __rcu *psample_group;
u32 psample_group_num;
struct list_head tcfm_list;
struct rcu_head rcu;
};
#define to_sample(a) ((struct tcf_sample *)a)

View File

@ -844,12 +844,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
}
#endif
/* TCP_SKB_CB reference means this can not be used from early demux */
static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
return true;
#endif
return false;

View File

@ -8,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/tracepoint.h>
#include <linux/bpf.h>
#define __XDP_ACT_MAP(FN) \
FN(ABORTED) \

View File

@ -1447,7 +1447,8 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
rcu_read_lock();
prog = rcu_dereference(progs)->progs;
for (; *prog; prog++)
cnt++;
if (*prog != &dummy_bpf_prog.prog)
cnt++;
rcu_read_unlock();
return cnt;
}

View File

@ -1,3 +1,18 @@
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree.
*
* THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
* WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
* OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/bug.h>

View File

@ -759,6 +759,8 @@ const struct bpf_prog_ops perf_event_prog_ops = {
static DEFINE_MUTEX(bpf_event_mutex);
#define BPF_TRACE_MAX_PROGS 64
int perf_event_attach_bpf_prog(struct perf_event *event,
struct bpf_prog *prog)
{
@ -772,6 +774,12 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
goto unlock;
old_array = event->tp_event->prog_array;
if (old_array &&
bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
ret = -E2BIG;
goto unlock;
}
ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
if (ret < 0)
goto unlock;

View File

@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
if (state == DCCP_TIME_WAIT)
timeo = DCCP_TIMEWAIT_LEN;
/* tw_timer is pinned, so we need to make sure BH are disabled
* in following section, otherwise timer handler could run before
* we complete the initialization.
*/
local_bh_disable();
inet_twsk_schedule(tw, timeo);
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
inet_twsk_put(tw);
local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than

View File

@ -1591,6 +1591,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_filter);
static void tcp_v4_restore_cb(struct sk_buff *skb)
{
memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
sizeof(struct inet_skb_parm));
}
static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
const struct tcphdr *th)
{
/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
* barrier() makes sure compiler wont play fool^Waliasing games.
*/
memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
sizeof(struct inet_skb_parm));
barrier();
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0;
TCP_SKB_CB(skb)->has_rxtstamp =
skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
}
/*
* From tcp_input.c
*/
@ -1631,24 +1659,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
* barrier() makes sure compiler wont play fool^Waliasing games.
*/
memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
sizeof(struct inet_skb_parm));
barrier();
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0;
TCP_SKB_CB(skb)->has_rxtstamp =
skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
lookup:
sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
th->dest, sdif, &refcounted);
@ -1679,14 +1689,19 @@ process:
sock_hold(sk);
refcounted = true;
nsk = NULL;
if (!tcp_filter(sk, skb))
if (!tcp_filter(sk, skb)) {
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
tcp_v4_fill_cb(skb, iph, th);
nsk = tcp_check_req(sk, skb, req, false);
}
if (!nsk) {
reqsk_put(req);
goto discard_and_relse;
}
if (nsk == sk) {
reqsk_put(req);
tcp_v4_restore_cb(skb);
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v4_send_reset(nsk, skb);
goto discard_and_relse;
@ -1712,6 +1727,7 @@ process:
goto discard_and_relse;
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
tcp_v4_fill_cb(skb, iph, th);
skb->dev = NULL;
@ -1742,6 +1758,8 @@ no_tcp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
tcp_v4_fill_cb(skb, iph, th);
if (tcp_checksum_complete(skb)) {
csum_error:
__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
@ -1768,6 +1786,8 @@ do_time_wait:
goto discard_it;
}
tcp_v4_fill_cb(skb, iph, th);
if (tcp_checksum_complete(skb)) {
inet_twsk_put(inet_twsk(sk));
goto csum_error;
@ -1784,6 +1804,7 @@ do_time_wait:
if (sk2) {
inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
tcp_v4_restore_cb(skb);
refcounted = false;
goto process;
}

View File

@ -310,10 +310,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
/* tw_timer is pinned, so we need to make sure BH are disabled
* in following section, otherwise timer handler could run before
* we complete the initialization.
*/
local_bh_disable();
inet_twsk_schedule(tw, timeo);
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
inet_twsk_put(tw);
local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than

View File

@ -1098,6 +1098,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
ipip6_tunnel_link(sitn, t);
t->parms.iph.ttl = p->iph.ttl;
t->parms.iph.tos = p->iph.tos;
t->parms.iph.frag_off = p->iph.frag_off;
if (t->parms.link != p->link || t->fwmark != fwmark) {
t->parms.link = p->link;
t->fwmark = fwmark;

View File

@ -1454,7 +1454,6 @@ process:
struct sock *nsk;
sk = req->rsk_listener;
tcp_v6_fill_cb(skb, hdr, th);
if (tcp_v6_inbound_md5_hash(sk, skb)) {
sk_drops_add(sk, skb);
reqsk_put(req);
@ -1467,8 +1466,12 @@ process:
sock_hold(sk);
refcounted = true;
nsk = NULL;
if (!tcp_filter(sk, skb))
if (!tcp_filter(sk, skb)) {
th = (const struct tcphdr *)skb->data;
hdr = ipv6_hdr(skb);
tcp_v6_fill_cb(skb, hdr, th);
nsk = tcp_check_req(sk, skb, req, false);
}
if (!nsk) {
reqsk_put(req);
goto discard_and_relse;
@ -1492,8 +1495,6 @@ process:
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
tcp_v6_fill_cb(skb, hdr, th);
if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard_and_relse;
@ -1501,6 +1502,7 @@ process:
goto discard_and_relse;
th = (const struct tcphdr *)skb->data;
hdr = ipv6_hdr(skb);
tcp_v6_fill_cb(skb, hdr, th);
skb->dev = NULL;
@ -1590,7 +1592,6 @@ do_time_wait:
tcp_v6_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
tcp_v6_restore_cb(skb);
tcp_v6_send_reset(sk, skb);
inet_twsk_deschedule_put(inet_twsk(sk));
goto discard_it;

View File

@ -860,6 +860,7 @@ static void rxrpc_sock_destructor(struct sock *sk)
static int rxrpc_release_sock(struct sock *sk)
{
struct rxrpc_sock *rx = rxrpc_sk(sk);
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
_enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
@ -895,8 +896,8 @@ static int rxrpc_release_sock(struct sock *sk)
rxrpc_release_calls_on_socket(rx);
flush_workqueue(rxrpc_workqueue);
rxrpc_purge_queue(&sk->sk_receive_queue);
rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
rxrpc_queue_work(&rxnet->service_conn_reaper);
rxrpc_queue_work(&rxnet->client_conn_reaper);
rxrpc_put_local(rx->local);
rx->local = NULL;

View File

@ -123,7 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
else
ack_at = expiry;
ack_at = jiffies + expiry;
ack_at += now;
if (time_before(ack_at, call->ack_at)) {
WRITE_ONCE(call->ack_at, ack_at);
rxrpc_reduce_call_timer(call, ack_at, now,
@ -426,7 +426,7 @@ recheck_state:
next = call->expect_rx_by;
#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
set(call->expect_req_by);
set(call->expect_term_by);
set(call->ack_at);

View File

@ -30,22 +30,18 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
struct rxrpc_channel *chan;
struct msghdr msg;
struct kvec iov;
struct kvec iov[3];
struct {
struct rxrpc_wire_header whdr;
union {
struct {
__be32 code;
} abort;
struct {
struct rxrpc_ackpacket ack;
u8 padding[3];
struct rxrpc_ackinfo info;
};
__be32 abort_code;
struct rxrpc_ackpacket ack;
};
} __attribute__((packed)) pkt;
struct rxrpc_ackinfo ack_info;
size_t len;
u32 serial, mtu, call_id;
int ioc;
u32 serial, mtu, call_id, padding;
_enter("%d", conn->debug_id);
@ -66,6 +62,13 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
msg.msg_controllen = 0;
msg.msg_flags = 0;
iov[0].iov_base = &pkt;
iov[0].iov_len = sizeof(pkt.whdr);
iov[1].iov_base = &padding;
iov[1].iov_len = 3;
iov[2].iov_base = &ack_info;
iov[2].iov_len = sizeof(ack_info);
pkt.whdr.epoch = htonl(conn->proto.epoch);
pkt.whdr.cid = htonl(conn->proto.cid);
pkt.whdr.callNumber = htonl(call_id);
@ -80,8 +83,10 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
len = sizeof(pkt.whdr);
switch (chan->last_type) {
case RXRPC_PACKET_TYPE_ABORT:
pkt.abort.code = htonl(chan->last_abort);
len += sizeof(pkt.abort);
pkt.abort_code = htonl(chan->last_abort);
iov[0].iov_len += sizeof(pkt.abort_code);
len += sizeof(pkt.abort_code);
ioc = 1;
break;
case RXRPC_PACKET_TYPE_ACK:
@ -94,13 +99,19 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0;
pkt.info.rxMTU = htonl(rxrpc_rx_mtu);
pkt.info.maxMTU = htonl(mtu);
pkt.info.rwind = htonl(rxrpc_rx_window_size);
pkt.info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
ack_info.rxMTU = htonl(rxrpc_rx_mtu);
ack_info.maxMTU = htonl(mtu);
ack_info.rwind = htonl(rxrpc_rx_window_size);
ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
pkt.whdr.flags |= RXRPC_SLOW_START_OK;
len += sizeof(pkt.ack) + sizeof(pkt.info);
padding = 0;
iov[0].iov_len += sizeof(pkt.ack);
len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
ioc = 3;
break;
default:
return;
}
/* Resync with __rxrpc_disconnect_call() and check that the last call
@ -110,9 +121,6 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
if (READ_ONCE(chan->last_call) != call_id)
return;
iov.iov_base = &pkt;
iov.iov_len = len;
serial = atomic_inc_return(&conn->serial);
pkt.whdr.serial = htonl(serial);
@ -127,7 +135,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
break;
}
kernel_sendmsg(conn->params.local->socket, &msg, &iov, 1, len);
kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
_leave("");
return;
}

View File

@ -424,7 +424,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
if (earliest != now + MAX_JIFFY_OFFSET) {
_debug("reschedule reaper %ld", (long)earliest - (long)now);
ASSERT(time_after(earliest, now));
rxrpc_set_service_reap_timer(rxnet, earliest);
rxrpc_set_service_reap_timer(rxnet, earliest);
}
while (!list_empty(&graveyard)) {

View File

@ -976,7 +976,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
rxrpc_reduce_call_timer(call, expect_rx_by, now,
rxrpc_timer_set_for_normal);
}
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
rxrpc_input_data(call, skb, skew);
@ -1213,7 +1213,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
goto reupgrade;
conn->service_id = sp->hdr.serviceId;
}
if (sp->hdr.callNumber == 0) {
/* Connection-level packet */
_debug("CONN %p {%d}", conn, conn->debug_id);

View File

@ -233,7 +233,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
if (resend_at < 1)
resend_at = 1;
resend_at = now + rxrpc_resend_timeout;
resend_at += now;
WRITE_ONCE(call->resend_at, resend_at);
rxrpc_reduce_call_timer(call, resend_at, now,
rxrpc_timer_set_for_send);

View File

@ -96,21 +96,14 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
return ret;
}
static void tcf_sample_cleanup_rcu(struct rcu_head *rcu)
{
struct tcf_sample *s = container_of(rcu, struct tcf_sample, rcu);
struct psample_group *psample_group;
psample_group = rcu_dereference_protected(s->psample_group, 1);
RCU_INIT_POINTER(s->psample_group, NULL);
psample_group_put(psample_group);
}
static void tcf_sample_cleanup(struct tc_action *a, int bind)
{
struct tcf_sample *s = to_sample(a);
struct psample_group *psample_group;
call_rcu(&s->rcu, tcf_sample_cleanup_rcu);
psample_group = rtnl_dereference(s->psample_group);
RCU_INIT_POINTER(s->psample_group, NULL);
psample_group_put(psample_group);
}
static bool tcf_sample_dev_ok_push(struct net_device *dev)
@ -264,7 +257,6 @@ static int __init sample_init_module(void)
static void __exit sample_cleanup_module(void)
{
rcu_barrier();
tcf_unregister_action(&act_sample_ops, &sample_net_ops);
}

View File

@ -53,6 +53,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
msg->send_failed = 0;
msg->send_error = 0;
msg->can_delay = 1;
msg->abandoned = 0;
msg->expires_at = 0;
INIT_LIST_HEAD(&msg->chunks);
}
@ -304,6 +305,13 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
if (!chunk->asoc->peer.prsctp_capable)
return 0;
if (chunk->msg->abandoned)
return 1;
if (!chunk->has_tsn &&
!(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
return 0;
if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
time_after(jiffies, chunk->msg->expires_at)) {
struct sctp_stream_out *streamout =
@ -316,6 +324,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
}
chunk->msg->abandoned = 1;
return 1;
} else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
@ -324,10 +333,12 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
chunk->msg->abandoned = 1;
return 1;
} else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
chunk->msg->expires_at &&
time_after(jiffies, chunk->msg->expires_at)) {
chunk->msg->abandoned = 1;
return 1;
}
/* PRIO policy is processed by sendmsg, not here */

View File

@ -364,10 +364,12 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
struct sctp_stream_out *streamout;
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
if (!chk->msg->abandoned &&
(!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
continue;
chk->msg->abandoned = 1;
list_del_init(&chk->transmitted_list);
sctp_insert_list(&asoc->outqueue.abandoned,
&chk->transmitted_list);
@ -377,7 +379,8 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
if (!chk->tsn_gap_acked) {
if (queue != &asoc->outqueue.retransmit &&
!chk->tsn_gap_acked) {
if (chk->transport)
chk->transport->flight_size -=
sctp_data_size(chk);
@ -403,10 +406,13 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
q->sched->unsched_all(&asoc->stream);
list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
if (!chk->msg->abandoned &&
(!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
continue;
chk->msg->abandoned = 1;
sctp_sched_dequeue_common(q, chk);
asoc->sent_cnt_removable--;
asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
@ -1434,7 +1440,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
/* If this chunk has not been acked, stop
* considering it as 'outstanding'.
*/
if (!tchunk->tsn_gap_acked) {
if (transmitted_queue != &q->retransmit &&
!tchunk->tsn_gap_acked) {
if (tchunk->transport)
tchunk->transport->flight_size -=
sctp_data_size(tchunk);

View File

@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
goto rcu_out;
}
tipc_rcv(sock_net(sk), skb, b);
rcu_read_unlock();
return 0;
rcu_out:
rcu_read_unlock();
out:

View File

@ -193,8 +193,18 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
return -1;
}
event_fd[prog_cnt - 1] = efd;
ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
err = ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
if (err < 0) {
printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
strerror(errno));
return -1;
}
err = ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
if (err < 0) {
printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
strerror(errno));
return -1;
}
return 0;
}

View File

@ -6,7 +6,7 @@ RM ?= rm -f
# Make the path relative to DESTDIR, not prefix
ifndef DESTDIR
prefix?=$(HOME)
prefix ?= /usr/local
endif
mandir ?= $(prefix)/share/man
man8dir = $(mandir)/man8

View File

@ -45,8 +45,8 @@ $(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
prefix = /usr
bash_compdir ?= $(prefix)/share/bash-completion/completions
prefix = /usr/local
bash_compdir ?= /usr/share/bash-completion/completions
CC = gcc
@ -76,6 +76,7 @@ clean: $(LIBBPF)-clean
$(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
install:
install -m 0755 -d $(prefix)/sbin
install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
install -m 0755 -d $(bash_compdir)
install -m 0644 bash-completion/bpftool $(bash_compdir)
@ -88,5 +89,5 @@ doc-install:
FORCE:
.PHONY: all clean FORCE
.PHONY: all clean FORCE install doc doc-install
.DEFAULT_GOAL := all

View File

@ -58,11 +58,19 @@ bool show_pinned;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
static void __noreturn clean_and_exit(int i)
{
if (json_output)
jsonw_destroy(&json_wtr);
exit(i);
}
void usage(void)
{
last_do_help(last_argc - 1, last_argv + 1);
exit(-1);
clean_and_exit(-1);
}
static int do_help(int argc, char **argv)
@ -280,6 +288,7 @@ int main(int argc, char **argv)
hash_init(prog_table.table);
hash_init(map_table.table);
opterr = 0;
while ((opt = getopt_long(argc, argv, "Vhpjf",
options, NULL)) >= 0) {
switch (opt) {
@ -291,13 +300,25 @@ int main(int argc, char **argv)
pretty_output = true;
/* fall through */
case 'j':
json_output = true;
if (!json_output) {
json_wtr = jsonw_new(stdout);
if (!json_wtr) {
p_err("failed to create JSON writer");
return -1;
}
json_output = true;
}
jsonw_pretty(json_wtr, pretty_output);
break;
case 'f':
show_pinned = true;
break;
default:
usage();
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
clean_and_exit(-1);
else
usage();
}
}
@ -306,15 +327,6 @@ int main(int argc, char **argv)
if (argc < 0)
usage();
if (json_output) {
json_wtr = jsonw_new(stdout);
if (!json_wtr) {
p_err("failed to create JSON writer");
return -1;
}
jsonw_pretty(json_wtr, pretty_output);
}
bfd_init();
ret = cmd_select(cmds, argc, argv, do_help);

View File

@ -41,6 +41,7 @@
#include <stdbool.h>
#include <stdio.h>
#include <linux/bpf.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/hashtable.h>
@ -50,7 +51,7 @@
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
#define BAD_ARG() ({ p_err("what is '%s'?\n", *argv); -1; })
#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
#define ERR_MAX_LEN 1024
@ -80,7 +81,7 @@ void p_info(const char *fmt, ...);
bool is_prefix(const char *pfx, const char *str);
void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
void usage(void) __attribute__((noreturn));
void usage(void) __noreturn;
struct pinned_obj_table {
DECLARE_HASHTABLE(table, 16);

View File

@ -3,6 +3,8 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/bpf.h>
#include <linux/filter.h>
@ -131,11 +133,16 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
int main(int argc, char **argv)
{
struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
char full_log[LOG_SIZE];
char log[LOG_SIZE];
size_t want_len;
int i;
/* allow unlimited locked memory to have more consistent error code */
if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
perror("Unable to lift memlock rlimit");
memset(log, 1, LOG_SIZE);
/* Test incorrect attr */