Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking changes from David Miller:
 "Most importantly this should cure the ipv4-mapped ipv6 socket TCP
  crashes some people were seeing, otherwise:

   1) Fix e1000e autonegotiation handling regression, from Tushar Dave.

   2) Fix TX data corruption race on e1000e down, also from Tushar Dave.

   3) Fix bfin_sir IRDA driver build, from Sonic Zhang.

   4) AF_PACKET mmap() tests a flag in the TX ring shared between
      userspace and the kernel for an internal consistency check.  It
      really shouldn't do this to validate the kernel's own behavior
      because the user can corrupt it to be any value at all.  From
      Daniel Borkmann.

   5) Fix TCP metrics leak on netns dismantle, from Eric Dumazet.

   6) Orphan the anonymous TCP socket from the SKB in
      ip_send_unicast_reply() so that the rest of the stack needn't see
      it.  Otherwise we get selinux problems of all sorts, from Eric
      Dumazet.

      This is the best way to fix this since the socket is just a place
      holder for sending packets in a context where we have no real
      socket at all.

   7) Fix TUN detach crashes, from Stanislav Kinsbursky.

   8) dev_set_alias() leaks memory on krealloc() failure, from Alexey
      Khoroshilov.

   9) FIB trie must use call_rcu() not call_rcu_bh(), because this code
      is not universally invoked from software interrupts.  From Eric
      Dumazet.

  10) PPTP looks up ipv4 routes with the wrong network namespace, fix
      from Gao Feng."

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (33 commits)
  bnx2x: Fix compiler warnings
  af_packet: remove BUG statement in tpacket_destruct_skb
  macvtap: rcu_dereference outside read-lock section
  codel: refine one condition to avoid a nul rec_inv_sqrt
  ixgbe: add missing braces
  ipv4: fix ip_send_skb()
  net: tcp: ipv6_mapped needs sk_rx_dst_set method
  ipv4: tcp: unicast_sock should not land outside of TCP stack
  bnx2x: Fix recovery flow cleanup during probe
  bnx2x: fix unload previous driver flow when flr-capable
  tun: don't zeroize sock->file on detach
  igb: Fix register defines for all non-82575 hardware
  e1000e: fix panic while dumping packets on Tx hang with IOMMU
  igb: fix panic while dumping packets on Tx hang with IOMMU
  tcp: must free metrics at net dismantle
  net/stmmac: mark probe function as __devinit
  lpc_eth: remove obsolete ifdefs
  net/core: Fix potential memory leak in dev_set_alias()
  cdc-phonet: Don't leak in usbpn_open
  batman-adv: Fix mem leak in the batadv_tt_local_event() function
  ...
This commit is contained in:
Linus Torvalds 2012-08-13 09:18:19 +03:00
commit 1c212c65b2
36 changed files with 233 additions and 156 deletions

View File

@ -1278,7 +1278,7 @@ struct bnx2x {
#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
#define BNX2X_FW_RX_ALIGN_END \
max(1UL << BNX2X_RX_ALIGN_SHIFT, \
max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)

View File

@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
return val != 0;
}
/*
* Reset the load status for the current engine.
*/
static void bnx2x_clear_load_status(struct bnx2x *bp)
{
u32 val;
u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
BNX2X_PATH0_LOAD_CNT_MASK);
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
static void _print_next_block(int idx, const char *blk)
{
pr_cont("%s%s", idx ? ", " : "", blk);
@ -9384,32 +9370,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
return rc;
}
static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
{
int pos;
u32 cap;
struct pci_dev *dev = bp->pdev;
pos = pci_pcie_cap(dev);
if (!pos)
return false;
pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return false;
return true;
}
static int __devinit bnx2x_do_flr(struct bnx2x *bp)
{
int i, pos;
u16 status;
struct pci_dev *dev = bp->pdev;
/* probe the capability first */
if (bnx2x_can_flr(bp))
return -ENOTTY;
if (CHIP_IS_E1x(bp)) {
BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
return -EINVAL;
}
/* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
bp->common.bc_ver);
return -EINVAL;
}
pos = pci_pcie_cap(dev);
if (!pos)
@ -9429,12 +9407,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp)
"transaction is not cleared; proceeding with reset anyway\n");
clear:
if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
bp->common.bc_ver);
return -EINVAL;
}
BNX2X_DEV_INFO("Initiating FLR\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
return 0;
@ -9454,8 +9428,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp))
return bnx2x_do_flr(bp);
rc = bnx2x_test_firmware_version(bp, false);
if (!rc) {
/* fw version is good */
BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
rc = bnx2x_do_flr(bp);
}
if (!rc) {
/* FLR was performed */
BNX2X_DEV_INFO("FLR successful\n");
return 0;
}
BNX2X_DEV_INFO("Could not FLR\n");
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
@ -11427,9 +11414,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (!chip_is_e1x)
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Reset the load counter */
bnx2x_clear_load_status(bp);
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;

View File

@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
u32 ctrl, ctrl_ext, eecd;
u32 ctrl, ctrl_ext, eecd, tctl;
s32 ret_val;
/*
@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
ew32(RCTL, 0);
ew32(TCTL, E1000_TCTL_PSP);
tctl = er32(TCTL);
tctl &= ~E1000_TCTL_EN;
ew32(TCTL, tctl);
e1e_flush();
usleep_range(10000, 20000);
@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
* If the partner code word is null, stop forcing
* and restart auto negotiation.
*/
if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
if (rxcw & E1000_RXCW_C) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));

View File

@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
struct e1000_buffer *bi)
{
int i;
struct e1000_ps_page *ps_page;
for (i = 0; i < adapter->rx_ps_pages; i++) {
ps_page = &bi->ps_pages[i];
if (ps_page->page) {
pr_info("packet dump for ps_page %d:\n", i);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
16, 1, page_address(ps_page->page),
PAGE_SIZE, true);
}
}
}
/*
* e1000e_dump - Print registers, Tx-ring and Rx-ring
*/
@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
(unsigned long long)buffer_info->time_stamp,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
16, 1, phys_to_virt(buffer_info->dma),
buffer_info->length, true);
16, 1, buffer_info->skb->data,
buffer_info->skb->len, true);
}
/* Print Rx Ring Summary */
@ -381,10 +399,8 @@ rx_ring_summary:
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
phys_to_virt(buffer_info->dma),
adapter->rx_ps_bsize0, true);
e1000e_dump_ps_pages(adapter,
buffer_info);
}
}
break;
@ -444,12 +460,12 @@ rx_ring_summary:
(unsigned long long)buffer_info->dma,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
if (netif_msg_pktdata(adapter) &&
buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16,
1,
phys_to_virt
(buffer_info->dma),
buffer_info->skb->data,
adapter->rx_buffer_len,
true);
}

View File

@ -156,8 +156,12 @@
: (0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
: (0x0E028 + ((_n) * 0x40)))
#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
(0x0C014 + ((_n) * 0x40)))
#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
(0x0E014 + ((_n) * 0x40)))
#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \

View File

@ -1498,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
break;
}
/* add small delay to avoid loopback test failure */
msleep(50);
/* force 1000, set loopback */
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);

View File

@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter)
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1, phys_to_virt(buffer_info->dma),
16, 1, buffer_info->skb->data,
buffer_info->length, true);
}
}
@ -547,18 +547,17 @@ rx_ring_summary:
(u64)buffer_info->dma,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter)) {
if (netif_msg_pktdata(adapter) &&
buffer_info->dma && buffer_info->skb) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
phys_to_virt(buffer_info->dma),
IGB_RX_HDR_LEN, true);
DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data,
IGB_RX_HDR_LEN, true);
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
phys_to_virt(
buffer_info->page_dma +
buffer_info->page_offset),
page_address(buffer_info->page) +
buffer_info->page_offset,
PAGE_SIZE/2, true);
}
}

View File

@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
(hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
}
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&

View File

@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev)
"phy-mode", NULL);
if (mode && !strcmp(mode, "mii"))
return PHY_INTERFACE_MODE_MII;
return PHY_INTERFACE_MODE_RMII;
}
/* non-DT */
#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
return PHY_INTERFACE_MODE_MII;
#else
return PHY_INTERFACE_MODE_RMII;
#endif
}
static bool use_iram_for_net(struct device *dev)
{
if (dev && dev->of_node)
return of_property_read_bool(dev->of_node, "use-iram");
/* non-DT */
#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
return true;
#else
return false;
#endif
}
/* Receive Status information word */

View File

@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
* the necessary resources and invokes the main to init
* the net device, register the mdio bus etc.
*/
static int stmmac_pltfr_probe(struct platform_device *pdev)
static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;

View File

@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
/* If not add the 'RPOLC', we can't catch the receive interrupt.
* It's related with the HW layout and the IR transiver.
*/
val |= IREN | RPOLC;
val |= UMOD_IRDA | RPOLC;
UART_PUT_GCTL(port, val);
return ret;
}
@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev
bfin_sir_stop_rx(port);
val = UART_GET_GCTL(port);
val &= ~(UCEN | IREN | RPOLC);
val &= ~(UCEN | UMOD_MASK | RPOLC);
UART_PUT_GCTL(port, val);
#ifdef CONFIG_SIR_BFIN_DMA
@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work)
* reset all the UART.
*/
val = UART_GET_GCTL(port);
val &= ~(IREN | RPOLC);
val &= ~(UMOD_MASK | RPOLC);
UART_PUT_GCTL(port, val);
SSYNC();
val |= IREN | RPOLC;
val |= UMOD_IRDA | RPOLC;
UART_PUT_GCTL(port, val);
SSYNC();
/* bfin_sir_set_speed(port, self->speed); */

View File

@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
int i;
for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
if (rcu_dereference(vlan->taps[i]) == q)
if (rcu_dereference_protected(vlan->taps[i],
lockdep_is_held(&macvtap_lock)) == q)
return i;
}

View File

@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
rt = ip_route_output_ports(&init_net, &fl4, NULL,
rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0, IPPROTO_GRE,
@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.private = sk;
po->chan.ops = &pptp_chan_ops;
rt = ip_route_output_ports(&init_net, &fl4, sk,
rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,

View File

@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun)
netif_tx_lock_bh(tun->dev);
netif_carrier_off(tun->dev);
tun->tfile = NULL;
tun->socket.file = NULL;
netif_tx_unlock_bh(tun->dev);
/* Drop read queue */

View File

@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev)
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
usb_free_urb(req);
usbpn_close(dev);
return -ENOMEM;
}

View File

@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
*/
static bool rs_use_green(struct ieee80211_sta *sta)
{
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
!(ctx->ht.non_gf_sta_present);
/*
* There's a bug somewhere in this code that causes the
* scaling to get stuck because GF+SGI can't be combined
* in SISO rates. Until we find that bug, disable GF, it
* has only limited benefit and we still interoperate with
* GF APs since we can always receive GF transmissions.
*/
return false;
}
/**

View File

@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
{
struct ieee80211_conf conf = { .flags = 0 };
struct rt2x00lib_conf libconf = { .conf = &conf };
struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}

View File

@ -74,20 +74,21 @@ struct can_frame {
/*
* defined bits for canfd_frame.flags
*
* As the default for CAN FD should be to support the high data rate in the
* payload section of the frame (HDR) and to support up to 64 byte in the
* data section (EDL) the bits are only set in the non-default case.
* Btw. as long as there's no real implementation for CAN FD network driver
* these bits are only preliminary.
* The use of struct canfd_frame implies the Extended Data Length (EDL) bit to
* be set in the CAN frame bitstream on the wire. The EDL bit switch turns
* the CAN controllers bitstream processor into the CAN FD mode which creates
* two new options within the CAN FD frame specification:
*
* RX: NOHDR/NOEDL - info about received CAN FD frame
* ESI - bit from originating CAN controller
* TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller
* ESI - bit is set by local CAN controller
* Bit Rate Switch - to indicate a second bitrate is/was used for the payload
* Error State Indicator - represents the error state of the transmitting node
*
* As the CANFD_ESI bit is internally generated by the transmitting CAN
* controller only the CANFD_BRS bit is relevant for real CAN controllers when
* building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make
* sense for virtual CAN interfaces to test applications with echoed frames.
*/
#define CANFD_NOHDR 0x01 /* frame without high data rate */
#define CANFD_NOEDL 0x02 /* frame without extended data length */
#define CANFD_ESI 0x04 /* error state indicator */
#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */
#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */
/**
* struct canfd_frame - CAN flexible data rate frame structure

View File

@ -305,6 +305,8 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
}
}
} else if (drop) {
u32 delta;
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
@ -320,9 +322,11 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
* assume that the drop rate that controlled the queue on the
* last cycle is a good starting point to control it now.
*/
if (codel_time_before(now - vars->drop_next,
delta = vars->count - vars->lastcount;
if (delta > 1 &&
codel_time_before(now - vars->drop_next,
16 * params->interval)) {
vars->count = (vars->count - vars->lastcount) | 1;
vars->count = delta;
/* we dont care if rec_inv_sqrt approximation
* is not very precise :
* Next Newton steps will correct it quadratically.

View File

@ -110,7 +110,7 @@ struct dst_entry {
};
extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
extern const u32 dst_default_metrics[RTAX_MAX];
extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
#define __DST_METRICS_PTR(Y) \

View File

@ -120,7 +120,7 @@ extern struct sk_buff *__ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork);
extern int ip_send_skb(struct sk_buff *skb);
extern int ip_send_skb(struct net *net, struct sk_buff *skb);
extern int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
extern void ip_flush_pending_frames(struct sock *sk);
extern struct sk_buff *ip_make_skb(struct sock *sk,

View File

@ -464,6 +464,7 @@ extern int tcp_disconnect(struct sock *sk, int flags);
void tcp_connect_init(struct sock *sk);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];

View File

@ -197,6 +197,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
del:
list_del(&entry->list);
kfree(entry);
kfree(tt_change_node);
event_removed = true;
goto unlock;
}

View File

@ -1055,6 +1055,8 @@ rollback:
*/
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
char *new_ifalias;
ASSERT_RTNL();
if (len >= IFALIASZ)
@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
return 0;
}
dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
if (!dev->ifalias)
new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
if (!new_ifalias)
return -ENOMEM;
dev->ifalias = new_ifalias;
strlcpy(dev->ifalias, alias, len+1);
return len;

View File

@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard);
const u32 dst_default_metrics[RTAX_MAX];
const u32 dst_default_metrics[RTAX_MAX + 1] = {
/* This initializer is needed to force linker to place this variable
* into const section. Otherwise it might end into bss section.
* We really want to avoid false sharing on this variable, and catch
* any writes on it.
*/
[RTAX_MAX] = 0xdeadbeef,
};
void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
int initial_ref, int initial_obsolete, unsigned short flags)

View File

@ -367,7 +367,7 @@ static void __leaf_free_rcu(struct rcu_head *head)
static inline void free_leaf(struct leaf *l)
{
call_rcu_bh(&l->rcu, __leaf_free_rcu);
call_rcu(&l->rcu, __leaf_free_rcu);
}
static inline void free_leaf_info(struct leaf_info *leaf)

View File

@ -1366,9 +1366,8 @@ out:
return skb;
}
int ip_send_skb(struct sk_buff *skb)
int ip_send_skb(struct net *net, struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
int err;
err = ip_local_out(skb);
@ -1391,7 +1390,7 @@ int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
return 0;
/* Netfilter gets whole the not fragmented skb. */
return ip_send_skb(skb);
return ip_send_skb(sock_net(sk), skb);
}
/*
@ -1536,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
skb_orphan(nskb);
skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
ip_push_pending_frames(sk, &fl4);
}

View File

@ -1869,7 +1869,7 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_destructor= tcp_twsk_destructor,
};
static void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@ -1877,6 +1877,7 @@ static void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
sk->sk_rx_dst = dst;
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
}
EXPORT_SYMBOL(inet_sk_rx_dst_set);
const struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,

View File

@ -731,6 +731,18 @@ static int __net_init tcp_net_metrics_init(struct net *net)
static void __net_exit tcp_net_metrics_exit(struct net *net)
{
unsigned int i;
for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
struct tcp_metrics_block *tm, *next;
tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
while (tm) {
next = rcu_dereference_protected(tm->tcpm_next, 1);
kfree(tm);
tm = next;
}
}
kfree(net->ipv4.tcp_metrics_hash);
}

View File

@ -758,7 +758,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
uh->check = CSUM_MANGLED_0;
send:
err = ip_send_skb(skb);
err = ip_send_skb(sock_net(sk), skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk),

View File

@ -1777,6 +1777,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.sk_rx_dst_set = inet_sk_rx_dst_set,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.net_header_len = sizeof(struct iphdr),

View File

@ -1079,7 +1079,7 @@ static void *packet_current_rx_frame(struct packet_sock *po,
default:
WARN(1, "TPACKET version not supported\n");
BUG();
return 0;
return NULL;
}
}
@ -1936,7 +1936,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
if (likely(po->tx_ring.pg_vec)) {
ph = skb_shinfo(skb)->destructor_arg;
BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
atomic_dec(&po->tx_ring.pending);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE);

View File

@ -203,6 +203,34 @@ out:
return index;
}
/* Length of the next packet (0 if the queue is empty). */
static unsigned int qdisc_peek_len(struct Qdisc *sch)
{
struct sk_buff *skb;
skb = sch->ops->peek(sch);
return skb ? qdisc_pkt_len(skb) : 0;
}
static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
unsigned int len);
static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
u32 lmax, u32 inv_w, int delta_w)
{
int i;
/* update qfq-specific data */
cl->lmax = lmax;
cl->inv_w = inv_w;
i = qfq_calc_index(cl->inv_w, cl->lmax);
cl->grp = &q->groups[i];
q->wsum += delta_w;
}
static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct nlattr **tca, unsigned long *arg)
{
@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
lmax = 1UL << QFQ_MTU_SHIFT;
if (cl != NULL) {
bool need_reactivation = false;
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return err;
}
if (inv_w != cl->inv_w) {
sch_tree_lock(sch);
q->wsum += delta_w;
cl->inv_w = inv_w;
sch_tree_unlock(sch);
if (lmax == cl->lmax && inv_w == cl->inv_w)
return 0; /* nothing to update */
i = qfq_calc_index(inv_w, lmax);
sch_tree_lock(sch);
if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
/*
* shift cl->F back, to not charge the
* class for the not-yet-served head
* packet
*/
cl->F = cl->S;
/* remove class from its slot in the old group */
qfq_deactivate_class(q, cl);
need_reactivation = true;
}
qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
if (need_reactivation) /* activate in new group */
qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
sch_tree_unlock(sch);
return 0;
}
@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->refcnt = 1;
cl->common.classid = classid;
cl->lmax = lmax;
cl->inv_w = inv_w;
i = qfq_calc_index(cl->inv_w, cl->lmax);
cl->grp = &q->groups[i];
qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return err;
}
}
q->wsum += weight;
sch_tree_lock(sch);
qdisc_class_hash_insert(&q->clhash, &cl->common);
@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
}
}
/* What is length of next packet in queue (0 if queue is empty) */
static unsigned int qdisc_peek_len(struct Qdisc *sch)
{
struct sk_buff *skb;
skb = sch->ops->peek(sch);
return skb ? qdisc_pkt_len(skb) : 0;
}
/*
* Updates the class, returns true if also the group needs to be updated.
*/
@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_group *grp;
struct qfq_class *cl;
int err;
u64 roundedS;
int s;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
/* If reach this point, queue q was idle */
grp = cl->grp;
qfq_activate_class(q, cl, qdisc_pkt_len(skb));
return err;
}
/*
* Handle class switch from idle to backlogged.
*/
static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
unsigned int pkt_len)
{
struct qfq_group *grp = cl->grp;
u64 roundedS;
int s;
qfq_update_start(q, cl);
/* compute new finish time and rounded start. */
cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
cl->F = cl->S + (u64)pkt_len * cl->inv_w;
roundedS = qfq_round_down(cl->S, grp->slot_shift);
/*
@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skip_update:
qfq_slot_insert(grp, cl, roundedS);
return err;
}

View File

@ -952,6 +952,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
*/
synchronize_rcu();
INIT_LIST_HEAD(&wdev->list);
/*
* Ensure that all events have been processed and
* freed.
*/
cfg80211_process_wdev_events(wdev);
break;
case NETDEV_PRE_UP:
if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))

View File

@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
struct net_device *dev, enum nl80211_iftype ntype,
u32 *flags, struct vif_params *params);
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
void cfg80211_process_wdev_events(struct wireless_dev *wdev);
int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,

View File

@ -735,7 +735,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
wdev->connect_keys = NULL;
}
static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
void cfg80211_process_wdev_events(struct wireless_dev *wdev)
{
struct cfg80211_event *ev;
unsigned long flags;