Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Fix memory leak in netfilter flowtable, from Roi Dayan.

 2) Ref-count leaks in netrom and tipc, from Xiyu Yang.

 3) Fix warning when mptcp socket is never accepted before close, from
    Florian Westphal.

 4) Missed locking in ovs_ct_exit(), from Tonghao Zhang.

 5) Fix large delays during PTP synchornization in cxgb4, from Rahul
    Lakkireddy.

 6) team_mode_get() can hang, from Taehee Yoo.

 7) Need to use kvzalloc() when allocating fw tracer in mlx5 driver,
    from Niklas Schnelle.

 8) Fix handling of bpf XADD on BTF memory, from Jann Horn.

 9) Fix BPF_STX/BPF_B encoding in x86 bpf jit, from Luke Nelson.

10) Missing queue memory release in iwlwifi pcie code, from Johannes
    Berg.

11) Fix NULL deref in macvlan device event, from Taehee Yoo.

12) Initialize lan87xx phy correctly, from Yuiko Oshino.

13) Fix looping between VRF and XFRM lookups, from David Ahern.

14) etf packet scheduler assumes all sockets are full sockets, which is
    not necessarily true. From Eric Dumazet.

15) Fix mptcp data_fin handling in RX path, from Paolo Abeni.

16) fib_select_default() needs to handle nexthop objects, from David
    Ahern.

17) Use GFP_ATOMIC under spinlock in mac80211_hwsim, from Wei Yongjun.

18) vxlan and geneve use wrong nlattr array, from Sabrina Dubroca.

19) Correct rx/tx stats in bcmgenet driver, from Doug Berger.

20) BPF_LDX zero-extension is encoded improperly in x86_32 bpf jit, fix
    from Luke Nelson.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (100 commits)
  selftests/bpf: Fix a couple of broken test_btf cases
  tools/runqslower: Ensure own vmlinux.h is picked up first
  bpf: Make bpf_link_fops static
  bpftool: Respect the -d option in struct_ops cmd
  selftests/bpf: Add test for freplace program with expected_attach_type
  bpf: Propagate expected_attach_type when verifying freplace programs
  bpf: Fix leak in LINK_UPDATE and enforce empty old_prog_fd
  bpf, x86_32: Fix logic error in BPF_LDX zero-extension
  bpf, x86_32: Fix clobbering of dst for BPF_JSET
  bpf, x86_32: Fix incorrect encoding in BPF_LDX zero-extension
  bpf: Fix reStructuredText markup
  net: systemport: suppress warnings on failed Rx SKB allocations
  net: bcmgenet: suppress warnings on failed Rx SKB allocations
  macsec: avoid to set wrong mtu
  mac80211: sta_info: Add lockdep condition for RCU list usage
  mac80211: populate debugfs only after cfg80211 init
  net: bcmgenet: correct per TX/RX ring statistics
  net: meth: remove spurious copyright text
  net: phy: bcm84881: clear settings on link down
  chcr: Fix CPU hard lockup
  ...
This commit is contained in:
Linus Torvalds 2020-04-24 19:17:30 -07:00
commit ab51cac00e
114 changed files with 1132 additions and 398 deletions

View File

@ -983,6 +983,13 @@ ip_early_demux - BOOLEAN
reduces overall throughput, in such case you should disable it.
Default: 1
ping_group_range - 2 INTEGERS
Restrict ICMP_PROTO datagram sockets to users in the group range.
The default is "1 0", meaning, that nobody (not even root) may
create ping sockets. Setting it to "100 100" would grant permissions
to the single group. "0 4294967295" would enable it for the world, "100
4294967295" would enable it for the users, but not daemons.
tcp_early_demux - BOOLEAN
Enable early demux for established TCP sockets.
Default: 1

View File

@ -189,7 +189,7 @@ F: drivers/net/hamradio/6pack.c
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: http://wireless.kernel.org/
W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
F: Documentation/driver-api/80211/cfg80211.rst
@ -505,7 +505,7 @@ F: drivers/hwmon/adm1029.c
ADM8211 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
W: http://wireless.kernel.org/
W: https://wireless.wiki.kernel.org/
F: drivers/net/wireless/admtek/adm8211.*
ADP1653 FLASH CONTROLLER DRIVER
@ -2850,14 +2850,14 @@ M: Nick Kossifidis <mickflemm@gmail.com>
M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-wireless@vger.kernel.org
S: Maintained
W: http://wireless.kernel.org/en/users/Drivers/ath5k
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
F: drivers/net/wireless/ath/ath5k/
ATHEROS ATH6KL WIRELESS DRIVER
M: Kalle Valo <kvalo@codeaurora.org>
L: linux-wireless@vger.kernel.org
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/ath6kl
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath6kl/
@ -3020,7 +3020,7 @@ B43 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
L: b43-dev@lists.infradead.org
S: Odd Fixes
W: http://wireless.kernel.org/en/users/Drivers/b43
W: https://wireless.wiki.kernel.org/en/users/Drivers/b43
F: drivers/net/wireless/broadcom/b43/
B43LEGACY WIRELESS DRIVER
@ -3028,7 +3028,7 @@ M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
L: b43-dev@lists.infradead.org
S: Maintained
W: http://wireless.kernel.org/en/users/Drivers/b43
W: https://wireless.wiki.kernel.org/en/users/Drivers/b43
F: drivers/net/wireless/broadcom/b43legacy/
BACKLIGHT CLASS/SUBSYSTEM
@ -3843,7 +3843,7 @@ CARL9170 LINUX COMMUNITY WIRELESS DRIVER
M: Christian Lamparter <chunkeey@googlemail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
W: http://wireless.kernel.org/en/users/Drivers/carl9170
W: https://wireless.wiki.kernel.org/en/users/Drivers/carl9170
F: drivers/net/wireless/ath/carl9170/
CAVIUM I2C DRIVER
@ -5176,6 +5176,7 @@ S: Maintained
F: drivers/soc/fsl/dpio
DPAA2 ETHERNET DRIVER
M: Ioana Ciornei <ioana.ciornei@nxp.com>
M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
@ -10075,7 +10076,7 @@ MAC80211
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: http://wireless.kernel.org/
W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
F: Documentation/networking/mac80211-injection.txt
@ -10705,7 +10706,6 @@ MEDIATEK MT76 WIRELESS LAN DRIVER
M: Felix Fietkau <nbd@nbd.name>
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
R: Ryder Lee <ryder.lee@mediatek.com>
R: Roy Luo <royluo@google.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt76/
@ -12656,7 +12656,7 @@ F: fs/orangefs/
ORINOCO DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
W: http://wireless.kernel.org/en/users/Drivers/orinoco
W: https://wireless.wiki.kernel.org/en/users/Drivers/orinoco
W: http://www.nongnu.org/orinoco/
F: drivers/net/wireless/intersil/orinoco/
@ -12682,7 +12682,7 @@ P54 WIRELESS DRIVER
M: Christian Lamparter <chunkeey@googlemail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
W: http://wireless.kernel.org/en/users/Drivers/p54
W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
F: drivers/net/wireless/intersil/p54/
PACKING
@ -13603,7 +13603,7 @@ PRISM54 WIRELESS DRIVER
M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-wireless@vger.kernel.org
S: Obsolete
W: http://wireless.kernel.org/en/users/Drivers/p54
W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
F: drivers/net/wireless/intersil/prism54/
PROC FILESYSTEM
@ -13944,7 +13944,7 @@ QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Kalle Valo <kvalo@codeaurora.org>
L: ath10k@lists.infradead.org
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/ath10k
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath10k/
@ -13959,7 +13959,7 @@ QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/ath9k
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
F: drivers/net/wireless/ath/ath9k/
QUALCOMM CAMERA SUBSYSTEM DRIVER
@ -14056,13 +14056,12 @@ QUALCOMM WCN36XX WIRELESS DRIVER
M: Kalle Valo <kvalo@codeaurora.org>
L: wcn36xx@lists.infradead.org
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/wcn36xx
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
T: git git://github.com/KrasnikovEugene/wcn36xx.git
F: drivers/net/wireless/ath/wcn36xx/
QUANTENNA QTNFMAC WIRELESS DRIVER
M: Igor Mitsyanko <imitsyanko@quantenna.com>
M: Avinash Patil <avinashp@quantenna.com>
M: Sergey Matyukevich <smatyukevich@quantenna.com>
L: linux-wireless@vger.kernel.org
S: Maintained
@ -14284,7 +14283,7 @@ REALTEK WIRELESS DRIVER (rtlwifi family)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
W: http://wireless.kernel.org/
W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
F: drivers/net/wireless/realtek/rtlwifi/
@ -14419,7 +14418,7 @@ RFKILL
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: http://wireless.kernel.org/
W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
F: Documentation/ABI/stable/sysfs-class-rfkill
@ -14568,7 +14567,7 @@ F: drivers/media/dvb-frontends/rtl2832_sdr*
RTL8180 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
W: http://wireless.kernel.org/
W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
F: drivers/net/wireless/realtek/rtl818x/rtl8180/
@ -14578,7 +14577,7 @@ M: Hin-Tak Leung <htl10@users.sourceforge.net>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: http://wireless.kernel.org/
W: https://wireless.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
@ -16933,8 +16932,8 @@ F: drivers/media/platform/ti-vpe/
TI WILINK WIRELESS DRIVERS
L: linux-wireless@vger.kernel.org
S: Orphan
W: http://wireless.kernel.org/en/users/Drivers/wl12xx
W: http://wireless.kernel.org/en/users/Drivers/wl1251
W: https://wireless.wiki.kernel.org/en/users/Drivers/wl12xx
W: https://wireless.wiki.kernel.org/en/users/Drivers/wl1251
T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
F: drivers/net/wireless/ti/
F: include/linux/wl12xx.h
@ -18216,7 +18215,7 @@ M: Maya Erez <merez@codeaurora.org>
L: linux-wireless@vger.kernel.org
L: wil6210@qti.qualcomm.com
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/wil6210
W: https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
F: drivers/net/wireless/ath/wil6210/
WIMAX STACK

View File

@ -158,6 +158,19 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_AX));
}
/*
* is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
* lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
* of encoding. al,cl,dl,bl have simpler encoding.
*/
static bool is_ereg_8l(u32 reg)
{
return is_ereg(reg) ||
(1 << reg) & (BIT(BPF_REG_1) |
BIT(BPF_REG_2) |
BIT(BPF_REG_FP));
}
static bool is_axreg(u32 reg)
{
return reg == BPF_REG_0;
@ -598,9 +611,8 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
switch (size) {
case BPF_B:
/* Emit 'mov byte ptr [rax + off], al' */
if (is_ereg(dst_reg) || is_ereg(src_reg) ||
/* We have to add extra byte for x86 SIL, DIL regs */
src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
else
EMIT1(0x88);

View File

@ -1847,14 +1847,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_B:
case BPF_H:
case BPF_W:
if (!bpf_prog->aux->verifier_zext)
if (bpf_prog->aux->verifier_zext)
break;
if (dstk) {
EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
STACK_VAR(dst_hi));
EMIT(0x0, 4);
} else {
EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0);
/* xor dst_hi,dst_hi */
EMIT2(0x33,
add_2reg(0xC0, dst_hi, dst_hi));
}
break;
case BPF_DW:
@ -2013,8 +2015,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 dreg_lo = IA32_EAX;
u8 dreg_hi = IA32_EDX;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
@ -2026,6 +2028,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
} else {
/* mov dreg_lo,dst_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
if (is_jmp64)
/* mov dreg_hi,dst_hi */
EMIT2(0x89,
add_2reg(0xC0, dreg_hi, dst_hi));
}
if (sstk) {
@ -2050,8 +2059,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 dreg_lo = IA32_EAX;
u8 dreg_hi = IA32_EDX;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
u32 hi;
@ -2064,6 +2073,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
} else {
/* mov dreg_lo,dst_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
if (is_jmp64)
/* mov dreg_hi,dst_hi */
EMIT2(0x89,
add_2reg(0xC0, dreg_hi, dst_hi));
}
/* mov ecx,imm32 */

View File

@ -120,12 +120,10 @@ out:
static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
int new_state)
{
unsigned long flags;
/* This function can be called from both rx (interrupt context) and tx
* queue contexts.
*/
spin_lock_irqsave(&tx_info->lock, flags);
spin_lock_bh(&tx_info->lock);
switch (tx_info->connection_state) {
case KTLS_CONN_CLOSED:
tx_info->connection_state = new_state;
@ -169,7 +167,7 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
pr_err("unknown KTLS connection state\n");
break;
}
spin_unlock_irqrestore(&tx_info->lock, flags);
spin_unlock_bh(&tx_info->lock);
return tx_info->connection_state;
}

View File

@ -1474,6 +1474,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
reg |= ARLTBL_RW;
else
reg &= ~ARLTBL_RW;
if (dev->vlan_enabled)
reg &= ~ARLTBL_IVL_SVL_SELECT;
else
reg |= ARLTBL_IVL_SVL_SELECT;
b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
return b53_arl_op_wait(dev);
@ -1483,6 +1487,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx,
bool is_valid)
{
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
unsigned int i;
int ret;
@ -1490,6 +1495,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
if (ret)
return ret;
bitmap_zero(free_bins, dev->num_arl_entries);
/* Read the bins */
for (i = 0; i < dev->num_arl_entries; i++) {
u64 mac_vid;
@ -1501,13 +1508,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
if (!(fwd_entry & ARLTBL_VALID))
if (!(fwd_entry & ARLTBL_VALID)) {
set_bit(i, free_bins);
continue;
}
if ((mac_vid & ARLTBL_MAC_MASK) != mac)
continue;
if (dev->vlan_enabled &&
((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
continue;
*idx = i;
return 0;
}
if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
return -ENOSPC;
*idx = find_first_bit(free_bins, dev->num_arl_entries);
return -ENOENT;
}
@ -1537,10 +1555,21 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
if (op)
return ret;
/* We could not find a matching MAC, so reset to a new entry */
if (ret) {
switch (ret) {
case -ENOSPC:
dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
addr, vid);
return is_valid ? ret : 0;
case -ENOENT:
/* We could not find a matching MAC, so reset to a new entry */
dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
addr, vid, idx);
fwd_entry = 0;
idx = 1;
break;
default:
dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
addr, vid, idx);
break;
}
/* For multicast address, the port is a bitmask and the validity
@ -1558,7 +1587,6 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
ent.is_valid = !!(ent.port);
}
ent.is_valid = is_valid;
ent.vid = vid;
ent.is_static = true;
ent.is_age = false;

View File

@ -292,6 +292,7 @@
/* ARL Table Read/Write Register (8 bit) */
#define B53_ARLTBL_RW_CTRL 0x00
#define ARLTBL_RW BIT(0)
#define ARLTBL_IVL_SVL_SELECT BIT(6)
#define ARLTBL_START_DONE BIT(7)
/* MAC Address Index Register (48 bit) */
@ -304,7 +305,7 @@
*
* BCM5325 and BCM5365 share most definitions below
*/
#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
#define ARLTBL_VID_MASK_25 0xff
@ -316,13 +317,16 @@
#define ARLTBL_VALID_25 BIT(63)
/* ARL Table Data Entry N Registers (32 bit) */
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
#define ARLTBL_TC(tc) ((3 & tc) << 11)
#define ARLTBL_AGE BIT(14)
#define ARLTBL_STATIC BIT(15)
#define ARLTBL_VALID BIT(16)
/* Maximum number of bin entries in the ARL for all switches */
#define B53_ARLTBL_MAX_BIN_ENTRIES 4
/* ARL Search Control Register (8 bit) */
#define B53_ARL_SRCH_CTL 0x50
#define B53_ARL_SRCH_CTL_25 0x20

View File

@ -664,7 +664,8 @@ static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
dma_addr_t mapping;
/* Allocate a new SKB for a new packet */
skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
@ -2475,7 +2476,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->wol_irq = platform_get_irq(pdev, 1);
}
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
dev_err(&pdev->dev, "invalid interrupts\n");
ret = -EINVAL;
goto err_free_netdev;
}

View File

@ -172,7 +172,6 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
struct resource *regs;
const u8 *mac_addr;
bgmac = bgmac_alloc(&pdev->dev);
@ -202,31 +201,21 @@ static int bgmac_probe(struct platform_device *pdev)
if (bgmac->irq < 0)
return bgmac->irq;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base");
if (!regs) {
dev_err(&pdev->dev, "Unable to obtain base resource\n");
return -EINVAL;
}
bgmac->plat.base = devm_ioremap_resource(&pdev->dev, regs);
bgmac->plat.base =
devm_platform_ioremap_resource_byname(pdev, "amac_base");
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
if (regs) {
bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(bgmac->plat.idm_base))
return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
}
bgmac->plat.idm_base =
devm_platform_ioremap_resource_byname(pdev, "idm_base");
if (IS_ERR(bgmac->plat.idm_base))
return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
if (regs) {
bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
regs);
if (IS_ERR(bgmac->plat.nicpm_base))
return PTR_ERR(bgmac->plat.nicpm_base);
}
bgmac->plat.nicpm_base =
devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
if (IS_ERR(bgmac->plat.nicpm_base))
return PTR_ERR(bgmac->plat.nicpm_base);
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;

View File

@ -934,6 +934,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
dev->netdev_ops->ndo_get_stats(dev);
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
@ -1622,7 +1624,8 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
dma_addr_t mapping;
/* Allocate a new Rx skb */
skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, priv->dev,
@ -3156,6 +3159,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
dev->stats.rx_packets = rx_packets;
dev->stats.rx_errors = rx_errors;
dev->stats.rx_missed_errors = rx_errors;
dev->stats.rx_dropped = rx_dropped;
return &dev->stats;
}

View File

@ -1049,9 +1049,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
}
}
static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type, unsigned long *region_size)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_meminfo mem_info;
@ -1060,15 +1060,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
rc = cudbg_fill_meminfo(padap, &mem_info);
if (rc)
if (rc) {
cudbg_err->sys_err = rc;
return rc;
}
cudbg_t4_fwcache(pdbg_init, cudbg_err);
rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
if (rc)
if (rc) {
cudbg_err->sys_err = rc;
return rc;
}
return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
if (region_size)
*region_size = mem_info.avail[mc_idx].limit -
mem_info.avail[mc_idx].base;
return 0;
}
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
@ -1076,7 +1084,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
unsigned long size = 0;
int rc;
rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
if (rc)
return rc;
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
cudbg_err);

View File

@ -311,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct adapter *adapter = (struct adapter *)container_of(ptp,
struct adapter, ptp_clock_info);
struct fw_ptp_cmd c;
struct adapter *adapter = container_of(ptp, struct adapter,
ptp_clock_info);
u64 ns;
int err;
memset(&c, 0, sizeof(c));
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_READ_F |
FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.ts.sc = FW_PTP_SC_GET_TIME;
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
if (err < 0) {
dev_err(adapter->pdev_dev,
"PTP: %s error %d\n", __func__, -err);
return err;
}
ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A));
ns |= (u64)t4_read_reg(adapter,
T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32;
/* convert to timespec*/
ns = be64_to_cpu(c.u.ts.tm);
*ts = ns_to_timespec64(ns);
return err;
return 0;
}
/**

View File

@ -1906,6 +1906,9 @@
#define MAC_PORT_CFG2_A 0x818
#define MAC_PORT_PTP_SUM_LO_A 0x990
#define MAC_PORT_PTP_SUM_HI_A 0x994
#define MPS_CMN_CTL_A 0x9000
#define COUNTPAUSEMCRX_S 5

View File

@ -1476,7 +1476,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
return ((begin ^ (end - 1)) >> 16) == 0;
}
return true;

View File

@ -1611,7 +1611,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
}
}
if (lut) {
bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
bool pf_lut = vsi->type == I40E_VSI_MAIN;
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
@ -11436,7 +11436,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
}
if (lut) {
bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
bool pf_lut = vsi->type == I40E_VSI_MAIN;
ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {

View File

@ -43,6 +43,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/moduleparam.h>
#include <linux/indirect_call_wrapper.h>
#include "mlx4_en.h"
@ -261,6 +262,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
}
}
INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
int napi_mode));
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@ -329,6 +334,11 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
return tx_info->nr_txbb;
}
INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
int napi_mode));
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
@ -449,7 +459,9 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
last_nr_txbb = ring->free_tx_desc(
last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc,
mlx4_en_free_tx_desc,
mlx4_en_recycle_tx_desc,
priv, ring, ring_index,
timestamp, napi_budget);

View File

@ -7,10 +7,10 @@ config MLX5_CORE
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on PCI
select NET_DEVLINK
imply PTP_1588_CLOCK
imply VXLAN
imply MLXFW
imply PCI_HYPERV_INTERFACE
depends on VXLAN || !VXLAN
depends on MLXFW || !MLXFW
depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
default n
---help---
Core driver for low level functionality of the ConnectX-4 and

View File

@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
return NULL;
}
tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL);
if (!tracer)
return ERR_PTR(-ENOMEM);
@ -982,7 +982,7 @@ destroy_workqueue:
tracer->dev = NULL;
destroy_workqueue(tracer->work_queue);
free_tracer:
kfree(tracer);
kvfree(tracer);
return ERR_PTR(err);
}
@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_destroy_log_buf(tracer);
flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
kfree(tracer);
kvfree(tracer);
}
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)

View File

@ -367,6 +367,7 @@ enum {
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
};
struct mlx5e_sq_wqe_info {
@ -960,7 +961,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);

View File

@ -12,6 +12,7 @@
#include <net/flow_offload.h>
#include <net/netfilter/nf_flow_table.h>
#include <linux/workqueue.h>
#include <linux/xarray.h>
#include "esw/chains.h"
#include "en/tc_ct.h"
@ -35,7 +36,7 @@ struct mlx5_tc_ct_priv {
struct mlx5_eswitch *esw;
const struct net_device *netdev;
struct idr fte_ids;
struct idr tuple_ids;
struct xarray tuple_ids;
struct rhashtable zone_ht;
struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat;
@ -238,7 +239,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
}
static void
@ -483,7 +484,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_flow_spec *spec = NULL;
u32 tupleid = 1;
u32 tupleid;
int err;
zone_rule->nat = nat;
@ -493,12 +494,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return -ENOMEM;
/* Get tuple unique id */
err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid,
TUPLE_ID_MAX, GFP_KERNEL);
err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule,
XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL);
if (err) {
netdev_warn(ct_priv->netdev,
"Failed to allocate tuple id, err: %d\n", err);
goto err_idr_alloc;
goto err_xa_alloc;
}
zone_rule->tupleid = tupleid;
@ -539,8 +540,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err_rule:
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
err_mod_hdr:
idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
err_idr_alloc:
xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
err_xa_alloc:
kfree(spec);
return err;
}
@ -1299,7 +1300,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
}
idr_init(&ct_priv->fte_ids);
idr_init(&ct_priv->tuple_ids);
xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1);
mutex_init(&ct_priv->control_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params);
@ -1334,7 +1335,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock);
idr_destroy(&ct_priv->tuple_ids);
xa_destroy(&ct_priv->tuple_ids);
idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv);
@ -1352,7 +1353,7 @@ mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
if (!ct_priv || !tupleid)
return true;
zone_rule = idr_find(&ct_priv->tuple_ids, tupleid);
zone_rule = xa_load(&ct_priv->tuple_ids, tupleid);
if (!zone_rule)
return false;

View File

@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
return 0;
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
return 0;
spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq);
spin_unlock(&c->xskicosq_lock);

View File

@ -3583,7 +3583,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
if (!mlx5e_monitor_counter_supported(priv)) {
/* In switchdev mode, monitor counters doesn't monitor
* rx/tx stats of 802_3. The update stats mechanism
* should keep the 802_3 layout counters updated
*/
if (!mlx5e_monitor_counter_supported(priv) ||
mlx5e_is_uplink_rep(priv)) {
/* update HW stats in background for next time */
mlx5e_queue_update_stats(priv);
}

View File

@ -589,7 +589,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err;
}
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
@ -597,11 +597,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
int i;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
return 0;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (likely(!cqe))
return;
return 0;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
@ -650,6 +650,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq);
return i;
}
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)

View File

@ -152,7 +152,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_post_rx_wqes,
rq);
if (xsk_open) {
mlx5e_poll_ico_cq(&c->xskicosq.cq);
if (mlx5e_poll_ico_cq(&c->xskicosq.cq))
/* Don't clear the flag if nothing was polled to prevent
* queueing more WQEs and overflowing XSKICOSQ.
*/
clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state);
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
}

View File

@ -380,7 +380,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&block->resource_list);
block->afa = mlxsw_afa;
@ -408,7 +408,7 @@ err_second_set_create:
mlxsw_afa_set_destroy(block->first_set);
err_first_set_create:
kfree(block);
return NULL;
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(mlxsw_afa_block_create);

View File

@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
* to be written using PEFA register to all indexes for all regions.
*/
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
if (!afa_block) {
err = -ENOMEM;
if (IS_ERR(afa_block)) {
err = PTR_ERR(afa_block);
goto err_afa_block;
}
err = mlxsw_afa_block_continue(afa_block);

View File

@ -464,7 +464,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
if (!rulei)
return NULL;
return ERR_PTR(-ENOMEM);
if (afa_block) {
rulei->act_block = afa_block;

View File

@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
int err;
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
if (!afa_block)
return ERR_PTR(-ENOMEM);
if (IS_ERR(afa_block))
return afa_block;
err = mlxsw_afa_block_append_allocated_counter(afa_block,
counter_index);

View File

@ -1,19 +1,3 @@
/*
* snull.h -- definitions for the network module
*
* Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet
* Copyright (C) 2001 O'Reilly & Associates
*
* The source code in this file can be freely used, adapted,
* and redistributed in source or binary form, so long as an
* acknowledgment appears in derived source files. The citation
* should list that the code comes from the book "Linux Device
* Drivers" by Alessandro Rubini and Jonathan Corbet, published
* by O'Reilly & Associates. No warranty is attached;
* we cannot take responsibility for errors or fitness for use.
*/
/* version dependencies have been confined to a separate file */
/* Tunable parameters */

View File

@ -5,8 +5,13 @@
#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
#include "dwmac-intel.h"
#include "stmmac.h"
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
};
/* This struct is used to associate PCI Function of MAC controller on a board,
* discovered via DMI, with the address of PHY connected to the MAC. The
* negative value of the address means that MAC controller is not connected
@ -49,6 +54,172 @@ static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
return -ENODEV;
}
static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
int phyreg, u32 mask, u32 val)
{
unsigned int retries = 10;
int val_rd;
do {
val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
if ((val_rd & mask) == (val & mask))
return 0;
udelay(POLL_DELAY_US);
} while (--retries);
return -ETIMEDOUT;
}
static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
{
struct intel_priv_data *intel_priv = priv_data;
struct stmmac_priv *priv = netdev_priv(ndev);
int serdes_phy_addr = 0;
u32 data = 0;
if (!intel_priv->mdio_adhoc_addr)
return 0;
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
/* assert clk_req */
data = mdiobus_read(priv->mii, serdes_phy_addr,
SERDES_GCR0);
data |= SERDES_PLL_CLK;
mdiobus_write(priv->mii, serdes_phy_addr,
SERDES_GCR0, data);
/* check for clk_ack assertion */
data = serdes_status_poll(priv, serdes_phy_addr,
SERDES_GSR0,
SERDES_PLL_CLK,
SERDES_PLL_CLK);
if (data) {
dev_err(priv->device, "Serdes PLL clk request timeout\n");
return data;
}
/* assert lane reset */
data = mdiobus_read(priv->mii, serdes_phy_addr,
SERDES_GCR0);
data |= SERDES_RST;
mdiobus_write(priv->mii, serdes_phy_addr,
SERDES_GCR0, data);
/* check for assert lane reset reflection */
data = serdes_status_poll(priv, serdes_phy_addr,
SERDES_GSR0,
SERDES_RST,
SERDES_RST);
if (data) {
dev_err(priv->device, "Serdes assert lane reset timeout\n");
return data;
}
/* move power state to P0 */
data = mdiobus_read(priv->mii, serdes_phy_addr,
SERDES_GCR0);
data &= ~SERDES_PWR_ST_MASK;
data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
mdiobus_write(priv->mii, serdes_phy_addr,
SERDES_GCR0, data);
/* Check for P0 state */
data = serdes_status_poll(priv, serdes_phy_addr,
SERDES_GSR0,
SERDES_PWR_ST_MASK,
SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
if (data) {
dev_err(priv->device, "Serdes power state P0 timeout.\n");
return data;
}
return 0;
}
static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
{
struct intel_priv_data *intel_priv = intel_data;
struct stmmac_priv *priv = netdev_priv(ndev);
int serdes_phy_addr = 0;
u32 data = 0;
if (!intel_priv->mdio_adhoc_addr)
return;
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
/* move power state to P3 */
data = mdiobus_read(priv->mii, serdes_phy_addr,
SERDES_GCR0);
data &= ~SERDES_PWR_ST_MASK;
data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
mdiobus_write(priv->mii, serdes_phy_addr,
SERDES_GCR0, data);
/* Check for P3 state */
data = serdes_status_poll(priv, serdes_phy_addr,
SERDES_GSR0,
SERDES_PWR_ST_MASK,
SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
if (data) {
dev_err(priv->device, "Serdes power state P3 timeout\n");
return;
}
/* de-assert clk_req */
data = mdiobus_read(priv->mii, serdes_phy_addr,
SERDES_GCR0);
data &= ~SERDES_PLL_CLK;
mdiobus_write(priv->mii, serdes_phy_addr,
SERDES_GCR0, data);
/* check for clk_ack de-assert */
data = serdes_status_poll(priv, serdes_phy_addr,
SERDES_GSR0,
SERDES_PLL_CLK,
(u32)~SERDES_PLL_CLK);
if (data) {
dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
return;
}
/* de-assert lane reset */
data = mdiobus_read(priv->mii, serdes_phy_addr,
SERDES_GCR0);
data &= ~SERDES_RST;
mdiobus_write(priv->mii, serdes_phy_addr,
SERDES_GCR0, data);
/* check for de-assert lane reset reflection */
data = serdes_status_poll(priv, serdes_phy_addr,
SERDES_GSR0,
SERDES_RST,
(u32)~SERDES_RST);
if (data) {
dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
return;
}
}
static void common_default_data(struct plat_stmmacenet_data *plat)
{
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
@ -189,6 +360,9 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return ehl_common_data(pdev, plat);
}
@ -233,6 +407,8 @@ static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return ehl_pse0_common_data(pdev, plat);
}
@ -263,6 +439,8 @@ static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return ehl_pse1_common_data(pdev, plat);
}
@ -291,6 +469,8 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
plat->bus_id = 1;
plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
}
@ -417,11 +597,17 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct intel_priv_data *intel_priv;
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
int i;
int ret;
intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv),
GFP_KERNEL);
if (!intel_priv)
return -ENOMEM;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
@ -457,6 +643,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
plat->bsp_priv = intel_priv;
intel_priv->mdio_adhoc_addr = 0x15;
ret = info->setup(pdev, plat);
if (ret)
return ret;

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020, Intel Corporation
* DWMAC Intel header file
*/
#ifndef __DWMAC_INTEL_H__
#define __DWMAC_INTEL_H__
#define POLL_DELAY_US 8
/* SERDES Register */
#define SERDES_GSR0 0x5 /* Global Status Reg0 */
#define SERDES_GCR0 0xb /* Global Configuration Reg0 */
/* SERDES defines */
#define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */
#define SERDES_RST BIT(2) /* Serdes Reset */
#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/
#define SERDES_PWR_ST_SHIFT 4
#define SERDES_PWR_ST_P0 0x0
#define SERDES_PWR_ST_P3 0x3
#endif /* __DWMAC_INTEL_H__ */

View File

@ -119,6 +119,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
{ .div = 5, .val = 5, },
{ .div = 6, .val = 6, },
{ .div = 7, .val = 7, },
{ /* end of array */ }
};
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);

View File

@ -291,16 +291,19 @@ static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac)
phymode == PHY_INTERFACE_MODE_MII ||
phymode == PHY_INTERFACE_MODE_GMII ||
phymode == PHY_INTERFACE_MODE_SGMII) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
&module);
module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
module);
} else {
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
}
if (dwmac->f2h_ptp_ref_clk)
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
else
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK <<
(reg_shift / 2));
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
/* Deassert reset for the phy configuration to be sampled by

View File

@ -27,12 +27,16 @@ static void config_sub_second_increment(void __iomem *ioaddr,
unsigned long data;
u32 reg_value;
/* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
* formula = (1/ptp_clock) * 1000000000
* where ptp_clock is 50MHz if fine method is used to update system
/* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second
* increment to twice the number of nanoseconds of a clock cycle.
* The calculation of the default_addend value by the caller will set it
* to mid-range = 2^31 when the remainder of this division is zero,
* which will make the accumulator overflow once every 2 ptp_clock
* cycles, adding twice the number of nanoseconds of a clock cycle :
* 2000000000ULL / ptp_clock.
*/
if (value & PTP_TCR_TSCFUPDT)
data = (1000000000ULL / 50000000);
data = (2000000000ULL / ptp_clock);
else
data = (1000000000ULL / ptp_clock);

View File

@ -4986,6 +4986,14 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
if (priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(ndev,
priv->plat->bsp_priv);
if (ret < 0)
return ret;
}
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
@ -5029,6 +5037,9 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_stop_all_dma(priv);
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
@ -5081,6 +5092,9 @@ int stmmac_suspend(struct device *dev)
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
@ -5143,6 +5157,7 @@ int stmmac_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
return 0;
@ -5170,6 +5185,14 @@ int stmmac_resume(struct device *dev)
stmmac_mdio_reset(priv->mii);
}
if (priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(ndev,
priv->plat->bsp_priv);
if (ret < 0)
return ret;
}
netif_device_attach(ndev);
mutex_lock(&priv->lock);

View File

@ -1387,6 +1387,8 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
return -ENODEV;
regs_phys = res->start;
port->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(port->regs))
return PTR_ERR(port->regs);
switch (port->id) {
case IXP4XX_ETH_NPEA:

View File

@ -1207,7 +1207,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]);
if (df < 0 || df > GENEVE_DF_MAX) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF],
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF],
"Invalid DF attribute");
return -EINVAL;
}

View File

@ -4002,11 +4002,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev;
int err;
sci_t sci;
u8 icv_len = DEFAULT_ICV_LEN;
rx_handler_func_t *rx_handler;
u8 icv_len = DEFAULT_ICV_LEN;
struct net_device *real_dev;
int err, mtu;
sci_t sci;
if (!tb[IFLA_LINK])
return -EINVAL;
@ -4033,7 +4033,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
if (mtu < 0)
dev->mtu = 0;
else
dev->mtu = mtu;
rx_handler = rtnl_dereference(real_dev->rx_handler);
if (rx_handler && rx_handler != macsec_handle_frame)

View File

@ -1704,7 +1704,7 @@ static int macvlan_device_event(struct notifier_block *unused,
struct macvlan_dev,
list);
if (macvlan_sync_address(vlan->dev, dev->dev_addr))
if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr))
return NOTIFY_BAD;
break;

View File

@ -155,9 +155,6 @@ static int bcm84881_read_status(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
phydev->link = false;
if (!phydev->link)
return 0;
linkmode_zero(phydev->lp_advertising);
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
@ -165,6 +162,9 @@ static int bcm84881_read_status(struct phy_device *phydev)
phydev->asym_pause = 0;
phydev->mdix = 0;
if (!phydev->link)
return 0;
if (phydev->autoneg_complete) {
val = genphy_c45_read_lpa(phydev);
if (val < 0)

View File

@ -246,7 +246,8 @@ static int mv3310_power_up(struct phy_device *phydev)
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
MV_V2_PORT_CTRL_PWRDOWN);
if (priv->firmware_ver < 0x00030000)
if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 ||
priv->firmware_ver < 0x00030000)
return ret;
return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,

View File

@ -3,9 +3,21 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/phy.h>
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
/* External Register Read Data Register */
#define LAN87XX_EXT_REG_RD_DATA (0x15)
/* External Register Write Data Register */
#define LAN87XX_EXT_REG_WR_DATA (0x16)
/* Interrupt Source Register */
#define LAN87XX_INTERRUPT_SOURCE (0x18)
@ -14,9 +26,160 @@
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
/* phyaccess nested types */
#define PHYACC_ATTR_MODE_READ 0
#define PHYACC_ATTR_MODE_WRITE 1
#define PHYACC_ATTR_MODE_MODIFY 2
#define PHYACC_ATTR_BANK_SMI 0
#define PHYACC_ATTR_BANK_MISC 1
#define PHYACC_ATTR_BANK_PCS 2
#define PHYACC_ATTR_BANK_AFE 3
#define PHYACC_ATTR_BANK_MAX 7
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
struct access_ereg_val {
u8 mode;
u8 bank;
u8 offset;
u16 val;
u16 mask;
};
static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
u8 offset, u16 val)
{
u16 ereg = 0;
int rc = 0;
if (mode > PHYACC_ATTR_MODE_WRITE || bank > PHYACC_ATTR_BANK_MAX)
return -EINVAL;
if (bank == PHYACC_ATTR_BANK_SMI) {
if (mode == PHYACC_ATTR_MODE_WRITE)
rc = phy_write(phydev, offset, val);
else
rc = phy_read(phydev, offset);
return rc;
}
if (mode == PHYACC_ATTR_MODE_WRITE) {
ereg = LAN87XX_EXT_REG_CTL_WR_CTL;
rc = phy_write(phydev, LAN87XX_EXT_REG_WR_DATA, val);
if (rc < 0)
return rc;
} else {
ereg = LAN87XX_EXT_REG_CTL_RD_CTL;
}
ereg |= (bank << 8) | offset;
rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
if (rc < 0)
return rc;
if (mode == PHYACC_ATTR_MODE_READ)
rc = phy_read(phydev, LAN87XX_EXT_REG_RD_DATA);
return rc;
}
static int access_ereg_modify_changed(struct phy_device *phydev,
u8 bank, u8 offset, u16 val, u16 mask)
{
int new = 0, rc = 0;
if (bank > PHYACC_ATTR_BANK_MAX)
return -EINVAL;
rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, bank, offset, val);
if (rc < 0)
return rc;
new = val | (rc & (mask ^ 0xFFFF));
rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, bank, offset, new);
return rc;
}
static int lan87xx_phy_init(struct phy_device *phydev)
{
static const struct access_ereg_val init[] = {
/* TX Amplitude = 5 */
{PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
0x000A, 0x001E},
/* Clear SMI interrupts */
{PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
0, 0},
/* Clear MISC interrupts */
{PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
0, 0},
/* Turn on TC10 Ring Oscillator (ROSC) */
{PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
0x0020, 0x0020},
/* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
{PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
0x283C, 0},
/* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
{PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
0x274F, 0},
/* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
* and Wake_In to wake PHY
*/
{PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
0x80A7, 0},
/* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
* to 128 uS
*/
{PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
0xF110, 0},
/* Enable HW Init */
{PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
0x0100, 0x0100},
};
int rc, i;
/* Start manual initialization procedures in Managed Mode */
rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
0x1a, 0x0000, 0x0100);
if (rc < 0)
return rc;
/* Soft Reset the SMI block */
rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
0x00, 0x8000, 0x8000);
if (rc < 0)
return rc;
/* Check to see if the self-clearing bit is cleared */
usleep_range(1000, 2000);
rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
PHYACC_ATTR_BANK_SMI, 0x00, 0);
if (rc < 0)
return rc;
if ((rc & 0x8000) != 0)
return -ETIMEDOUT;
/* PHY Initialization */
for (i = 0; i < ARRAY_SIZE(init); i++) {
if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
rc = access_ereg_modify_changed(phydev, init[i].bank,
init[i].offset,
init[i].val,
init[i].mask);
} else {
rc = access_ereg(phydev, init[i].mode, init[i].bank,
init[i].offset, init[i].val);
}
if (rc < 0)
return rc;
}
return 0;
}
static int lan87xx_phy_config_intr(struct phy_device *phydev)
{
int rc, val = 0;
@ -40,6 +203,13 @@ static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
static int lan87xx_config_init(struct phy_device *phydev)
{
int rc = lan87xx_phy_init(phydev);
return rc < 0 ? rc : 0;
}
static struct phy_driver microchip_t1_phy_driver[] = {
{
.phy_id = 0x0007c150,
@ -48,6 +218,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
.config_aneg = genphy_config_aneg,
.ack_interrupt = lan87xx_phy_ack_interrupt,

View File

@ -468,6 +468,9 @@ static const struct team_mode *team_mode_get(const char *kind)
struct team_mode_item *mitem;
const struct team_mode *mode = NULL;
if (!try_module_get(THIS_MODULE))
return NULL;
spin_lock(&mode_list_lock);
mitem = __find_mode(kind);
if (!mitem) {
@ -483,6 +486,7 @@ static const struct team_mode *team_mode_get(const char *kind)
}
spin_unlock(&mode_list_lock);
module_put(THIS_MODULE);
return mode;
}

View File

@ -188,8 +188,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
fl6.flowi6_proto = iph->nexthdr;
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
dst = ip6_route_output(net, NULL, &fl6);
if (dst == dst_null)
dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
if (IS_ERR(dst) || dst == dst_null)
goto err;
skb_dst_drop(skb);
@ -474,7 +474,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
return skb;
if (qdisc_tx_is_default(vrf_dev))
if (qdisc_tx_is_default(vrf_dev) ||
IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
return vrf_ip6_out_direct(vrf_dev, sk, skb);
return vrf_ip6_out_redirect(vrf_dev, skb);
@ -686,7 +687,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
if (qdisc_tx_is_default(vrf_dev))
if (qdisc_tx_is_default(vrf_dev) ||
IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
return vrf_ip_out_direct(vrf_dev, sk, skb);
return vrf_ip_out_redirect(vrf_dev, skb);

View File

@ -3144,7 +3144,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
if (id >= VXLAN_N_VID) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_ID],
"VXLAN ID must be lower than 16777216");
return -ERANGE;
}
@ -3155,7 +3155,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
if (ntohs(p->high) < ntohs(p->low)) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_PORT_RANGE],
"Invalid source port range");
return -EINVAL;
}
@ -3165,7 +3165,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
if (df < 0 || df > VXLAN_DF_MAX) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_DF],
"Invalid DF attribute");
return -EINVAL;
}

View File

@ -354,6 +354,7 @@ out:
usb_autopm_put_interface(i2400mu->usb_iface);
d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n",
i2400m, ack, ack_size, (long) result);
usb_put_urb(&notif_urb);
return result;
error_exceeded:

View File

@ -374,7 +374,7 @@ out:
}
static void *
il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
il3945_rs_alloc(struct ieee80211_hw *hw)
{
return hw->priv;
}

View File

@ -2474,7 +2474,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
}
static void *
il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
il4965_rs_alloc(struct ieee80211_hw *hw)
{
return hw->priv;
}

View File

@ -3019,7 +3019,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
cpu_to_le16(priv->lib->bt_params->agg_time_limit);
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
static void *rs_alloc(struct ieee80211_hw *hw)
{
return hw->priv;
}

View File

@ -296,9 +296,14 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
if (!prof->enabled) {
IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
profs[i]);
/* if one of the profiles is disabled, we fail all */
return -ENOENT;
/*
* if one of the profiles is disabled, we
* ignore all of them and return 1 to
* differentiate disabled from other failures.
*/
return 1;
}
IWL_DEBUG_INFO(fwrt,
"SAR EWRD: chain %d profile index %d\n",
i, profs[i]);

View File

@ -8,7 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2019 Intel Corporation
* Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2019 Intel Corporation
* Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -99,7 +99,7 @@ enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
IWL_MVM_DQA_MAX_DATA_QUEUE = 30,
};
enum iwl_mvm_tx_fifo {

View File

@ -1467,7 +1467,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
kmemdup(pieces->dbg_conf_tlv[i],
pieces->dbg_conf_tlv_len[i],
GFP_KERNEL);
if (!pieces->dbg_conf_tlv[i])
if (!drv->fw.dbg.conf_tlv[i])
goto out_free_fw;
}
}

View File

@ -532,8 +532,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
IEEE80211_HE_MAC_CAP2_ACK_EN,
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
@ -617,8 +616,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_BSR |
IEEE80211_HE_MAC_CAP2_ACK_EN,
IEEE80211_HE_MAC_CAP2_BSR,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,

View File

@ -727,6 +727,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
struct iwl_dev_tx_power_cmd_v4 v4;
} cmd;
int ret;
u16 len = 0;
cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
@ -741,9 +742,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
len = sizeof(cmd.v4.v3);
if (iwl_sar_select_profile(&mvm->fwrt, cmd.v5.v3.per_chain_restriction,
prof_a, prof_b))
return -ENOENT;
ret = iwl_sar_select_profile(&mvm->fwrt,
cmd.v5.v3.per_chain_restriction,
prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
return ret;
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@ -1034,16 +1040,7 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
ret);
ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
/*
* If we don't have profile 0 from BIOS, just skip it. This
* means that SAR Geo will not be enabled either, even if we
* have other valid profiles.
*/
if (ret == -ENOENT)
return 1;
return ret;
return iwl_mvm_sar_select_profile(mvm, 1, 1);
}
static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
@ -1272,7 +1269,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0) {
ret = iwl_mvm_sar_geo_init(mvm);
} else if (ret > 0 && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
} else if (ret == -ENOENT && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is

View File

@ -3665,7 +3665,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
static void *rs_alloc(struct ieee80211_hw *hw)
{
return hw->priv;
}

View File

@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -566,6 +566,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_stat_data {
struct iwl_mvm *mvm;
__le32 flags;
__le32 mac_id;
u8 beacon_filter_average_energy;
void *general;
@ -606,6 +607,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
-general->beacon_average_energy[vif_id];
}
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
mvmvif->beacon_stats.accu_num_beacons +=
mvmvif->beacon_stats.num_beacons;
if (mvmvif->id != id)
return;
@ -763,6 +771,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
flags = stats->flag;
}
data.flags = flags;
iwl_mvm_rx_stats_check_trigger(mvm, pkt);

View File

@ -722,6 +722,11 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
lockdep_assert_held(&mvm->mutex);
if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
"max queue %d >= num_of_queues (%d)", maxq,
mvm->trans->trans_cfg->base_params->num_of_queues))
maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
/* This should not be hit with new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -ENOSPC;
@ -1164,9 +1169,9 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
inactive_tid_bitmap,
&unshare_queues,
&changetid_queues);
if (ret >= 0 && free_queue < 0) {
if (ret && free_queue < 0) {
queue_owner = sta;
free_queue = ret;
free_queue = i;
}
/* only unlock sta lock - we still need the queue info lock */
spin_unlock_bh(&mvmsta->lock);

View File

@ -129,6 +129,18 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
switch (trans_pcie->rx_buf_size) {
case IWL_AMSDU_DEF:
return -EINVAL;
case IWL_AMSDU_2K:
break;
case IWL_AMSDU_4K:
case IWL_AMSDU_8K:
case IWL_AMSDU_12K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
break;
}
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
&trans_pcie->prph_scratch_dma_addr,
@ -143,10 +155,8 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
IWL_PRPH_SCRATCH_MTR_MODE |
(IWL_PRPH_MTR_FORMAT_256B &
IWL_PRPH_SCRATCH_MTR_FORMAT);
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =

View File

@ -1418,6 +1418,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
iwl_pcie_gen2_txq_unmap(trans, queue);
iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
trans_pcie->txq[queue] = NULL;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
}

View File

@ -4068,7 +4068,7 @@ static void hwsim_virtio_rx_work(struct work_struct *work)
}
vq = hwsim_vqs[HWSIM_VQ_RX];
sg_init_one(sg, skb->head, skb_end_offset(skb));
err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL);
err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_ATOMIC);
if (WARN(err, "virtqueue_add_inbuf returned %d\n", err))
nlmsg_free(skb);
else

View File

@ -261,7 +261,7 @@ static void rtl_rate_update(void *ppriv,
{
}
static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
static void *rtl_rate_alloc(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
return rtlpriv;

View File

@ -400,8 +400,8 @@ static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
ines_write32(port, ts_stat_rx, ts_stat_rx);
ines_write32(port, ts_stat_tx, ts_stat_tx);
port->rxts_enabled = ts_stat_rx == TS_ENABLE ? true : false;
port->txts_enabled = ts_stat_tx == TS_ENABLE ? true : false;
port->rxts_enabled = ts_stat_rx == TS_ENABLE;
port->txts_enabled = ts_stat_tx == TS_ENABLE;
spin_unlock_irqrestore(&port->lock, flags);

View File

@ -177,6 +177,8 @@ struct plat_stmmacenet_data {
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
void (*fix_mac_speed)(void *priv, unsigned int speed);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
struct mac_device_info *(*setup)(void *priv);

View File

@ -6007,7 +6007,9 @@ enum rate_control_capabilities {
struct rate_control_ops {
unsigned long capa;
const char *name;
void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
void *(*alloc)(struct ieee80211_hw *hw);
void (*add_debugfs)(struct ieee80211_hw *hw, void *priv,
struct dentry *debugfsdir);
void (*free)(void *priv);
void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);

View File

@ -41,7 +41,7 @@ enum {
ND_OPT_DNSSL = 31, /* RFC6106 */
ND_OPT_6CO = 34, /* RFC6775 */
ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */
ND_OPT_PREF64 = 38, /* RFC-ietf-6man-ra-pref64-09 */
ND_OPT_PREF64 = 38, /* RFC8781 */
__ND_OPT_MAX
};

View File

@ -51,7 +51,7 @@ extern struct inet_hashinfo tcp_hashinfo;
extern struct percpu_counter tcp_orphan_count;
void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
#define TCP_MIN_SND_MSS 48
#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)

View File

@ -1642,7 +1642,7 @@ union bpf_attr {
* ifindex, but doesn't require a map to do so.
* Return
* **XDP_REDIRECT** on success, or the value of the two lower bits
* of the **flags* argument on error.
* of the *flags* argument on error.
*
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description

View File

@ -469,7 +469,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
return -EOVERFLOW;
/* Make sure CPU is a valid possible cpu */
if (!cpu_possible(key_cpu))
if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
return -ENODEV;
if (qsize == 0) {

View File

@ -2283,7 +2283,7 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
}
#endif
const struct file_operations bpf_link_fops = {
static const struct file_operations bpf_link_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = bpf_link_show_fdinfo,
#endif
@ -3628,8 +3628,10 @@ static int link_update(union bpf_attr *attr)
return PTR_ERR(link);
new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
if (IS_ERR(new_prog))
return PTR_ERR(new_prog);
if (IS_ERR(new_prog)) {
ret = PTR_ERR(new_prog);
goto out_put_link;
}
if (flags & BPF_F_REPLACE) {
old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
@ -3638,6 +3640,9 @@ static int link_update(union bpf_attr *attr)
old_prog = NULL;
goto out_put_progs;
}
} else if (attr->link_update.old_prog_fd) {
ret = -EINVAL;
goto out_put_progs;
}
#ifdef CONFIG_CGROUP_BPF
@ -3653,6 +3658,8 @@ out_put_progs:
bpf_prog_put(old_prog);
if (ret)
bpf_prog_put(new_prog);
out_put_link:
bpf_link_put(link);
return ret;
}

View File

@ -2118,6 +2118,15 @@ static bool register_is_const(struct bpf_reg_state *reg)
return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
}
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
if (allow_ptr_leaks)
return false;
return reg->type != SCALAR_VALUE;
}
static void save_register_state(struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg)
{
@ -2308,6 +2317,16 @@ static int check_stack_read(struct bpf_verifier_env *env,
* which resets stack/reg liveness for state transitions
*/
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
/* If value_regno==-1, the caller is asking us whether
* it is acceptable to use this value as a SCALAR_VALUE
* (e.g. for XADD).
* We must not allow unprivileged callers to do that
* with spilled pointers.
*/
verbose(env, "leaking pointer from stack off %d\n",
off);
return -EACCES;
}
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
} else {
@ -2673,15 +2692,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
return -EACCES;
}
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
if (allow_ptr_leaks)
return false;
return reg->type != SCALAR_VALUE;
}
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
{
return cur_regs(env) + regno;
@ -3089,7 +3099,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
if (ret < 0)
return ret;
if (atype == BPF_READ) {
if (atype == BPF_READ && value_regno >= 0) {
if (ret == SCALAR_VALUE) {
mark_reg_unknown(env, regs, value_regno);
return 0;
@ -10487,6 +10497,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
return -EINVAL;
}
env->ops = bpf_verifier_ops[tgt_prog->type];
prog->expected_attach_type = tgt_prog->expected_attach_type;
}
if (!tgt_prog->jited) {
verbose(env, "Can attach to only JITed progs\n");
@ -10831,6 +10842,13 @@ err_release_maps:
* them now. Otherwise free_used_maps() will release them.
*/
release_maps(env);
/* extension progs temporarily inherit the attach_type of their targets
for verification purposes, so set it back to zero before returning
*/
if (env->prog->type == BPF_PROG_TYPE_EXT)
env->prog->expected_attach_type = 0;
*prog = env->prog;
err_unlock:
if (!is_priv)

View File

@ -127,10 +127,8 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
cs->classid = (u32)value;
css_task_iter_start(css, 0, &it);
while ((p = css_task_iter_next(&it))) {
while ((p = css_task_iter_next(&it)))
update_classid_task(p, cs->classid);
cond_resched();
}
css_task_iter_end(&it);
return 0;

View File

@ -1770,11 +1770,9 @@ int dsa_slave_create(struct dsa_port *port)
rtnl_lock();
ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
rtnl_unlock();
if (ret && ret != -EOPNOTSUPP) {
dev_err(ds->dev, "error %d setting MTU on port %d\n",
ret, port->index);
goto out_free;
}
if (ret)
dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n",
ret, port->index);
netif_carrier_off(slave_dev);

View File

@ -2014,7 +2014,7 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
struct fib_info *next_fi = fa->fa_info;
struct fib_nh *nh;
struct fib_nh_common *nhc;
if (fa->fa_slen != slen)
continue;
@ -2037,8 +2037,8 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
fa->fa_type != RTN_UNICAST)
continue;
nh = fib_info_nh(next_fi, 0);
if (!nh->fib_nh_gw4 || nh->fib_nh_scope != RT_SCOPE_LINK)
nhc = fib_info_nhc(next_fi, 0);
if (!nhc->nhc_gw_family || nhc->nhc_scope != RT_SCOPE_LINK)
continue;
fib_alias_accessed(fa);

View File

@ -58,9 +58,7 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
{
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
#ifdef CONFIG_NETFILTER
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
#endif
return xfrm_output(sk, skb);
}

View File

@ -183,15 +183,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = -EBUSY;
break;
}
} else if (sk->sk_protocol == IPPROTO_TCP) {
if (sk->sk_prot != &tcpv6_prot) {
retv = -EBUSY;
break;
}
break;
} else {
}
if (sk->sk_protocol == IPPROTO_TCP &&
sk->sk_prot != &tcpv6_prot) {
retv = -EBUSY;
break;
}
if (sk->sk_protocol != IPPROTO_TCP)
break;
if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;

View File

@ -8,6 +8,7 @@
#include <net/rpl.h>
#define IPV6_PFXTAIL_LEN(x) (sizeof(struct in6_addr) - (x))
#define IPV6_RPL_BEST_ADDR_COMPRESSION 15
static void ipv6_rpl_addr_decompress(struct in6_addr *dst,
const struct in6_addr *daddr,
@ -73,7 +74,7 @@ static unsigned char ipv6_rpl_srh_calc_cmpri(const struct ipv6_rpl_sr_hdr *inhdr
}
}
return plen;
return IPV6_RPL_BEST_ADDR_COMPRESSION;
}
static unsigned char ipv6_rpl_srh_calc_cmpre(const struct in6_addr *daddr,
@ -83,10 +84,10 @@ static unsigned char ipv6_rpl_srh_calc_cmpre(const struct in6_addr *daddr,
for (plen = 0; plen < sizeof(*daddr); plen++) {
if (daddr->s6_addr[plen] != last_segment->s6_addr[plen])
break;
return plen;
}
return plen;
return IPV6_RPL_BEST_ADDR_COMPRESSION;
}
void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr,

View File

@ -111,9 +111,7 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
{
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
#ifdef CONFIG_NETFILTER
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
#endif
return xfrm_output(sk, skb);
}

View File

@ -1183,8 +1183,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
IEEE80211_TX_STATUS_HEADROOM);
debugfs_hw_add(local);
/*
* if the driver doesn't specify a max listen interval we
* use 5 which should be a safe default
@ -1273,6 +1271,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (result < 0)
goto fail_wiphy_register;
debugfs_hw_add(local);
rate_control_add_debugfs(local);
rtnl_lock();
/* add one default STA interface if supported */

View File

@ -214,17 +214,16 @@ static ssize_t rcname_read(struct file *file, char __user *userbuf,
ref->ops->name, len);
}
static const struct file_operations rcname_ops = {
const struct file_operations rcname_ops = {
.read = rcname_read,
.open = simple_open,
.llseek = default_llseek,
};
#endif
static struct rate_control_ref *rate_control_alloc(const char *name,
struct ieee80211_local *local)
static struct rate_control_ref *
rate_control_alloc(const char *name, struct ieee80211_local *local)
{
struct dentry *debugfsdir = NULL;
struct rate_control_ref *ref;
ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
@ -234,13 +233,7 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
if (!ref->ops)
goto free;
#ifdef CONFIG_MAC80211_DEBUGFS
debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
local->debugfs.rcdir = debugfsdir;
debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops);
#endif
ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
ref->priv = ref->ops->alloc(&local->hw);
if (!ref->priv)
goto free;
return ref;

View File

@ -60,6 +60,29 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta)
#endif
}
extern const struct file_operations rcname_ops;
static inline void rate_control_add_debugfs(struct ieee80211_local *local)
{
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfsdir;
if (!local->rate_ctrl)
return;
if (!local->rate_ctrl->ops->add_debugfs)
return;
debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
local->debugfs.rcdir = debugfsdir;
debugfs_create_file("name", 0400, debugfsdir,
local->rate_ctrl, &rcname_ops);
local->rate_ctrl->ops->add_debugfs(&local->hw, local->rate_ctrl->priv,
debugfsdir);
#endif
}
void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata);
/* Get a reference to the rate control algorithm. If `name' is NULL, get the

View File

@ -1635,7 +1635,7 @@ minstrel_ht_init_cck_rates(struct minstrel_priv *mp)
}
static void *
minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
minstrel_ht_alloc(struct ieee80211_hw *hw)
{
struct minstrel_priv *mp;
@ -1673,7 +1673,17 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
mp->update_interval = HZ / 10;
mp->new_avg = true;
minstrel_ht_init_cck_rates(mp);
return mp;
}
#ifdef CONFIG_MAC80211_DEBUGFS
static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv,
struct dentry *debugfsdir)
{
struct minstrel_priv *mp = priv;
mp->fixed_rate_idx = (u32) -1;
debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
&mp->fixed_rate_idx);
@ -1681,12 +1691,8 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
&mp->sample_switch);
debugfs_create_bool("new_avg", S_IRUGO | S_IWUSR, debugfsdir,
&mp->new_avg);
#endif
minstrel_ht_init_cck_rates(mp);
return mp;
}
#endif
static void
minstrel_ht_free(void *priv)
@ -1725,6 +1731,7 @@ static const struct rate_control_ops mac80211_minstrel_ht = {
.alloc = minstrel_ht_alloc,
.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_debugfs = minstrel_ht_add_debugfs,
.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
#endif
.get_expected_throughput = minstrel_ht_get_expected_throughput,

View File

@ -231,7 +231,8 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
int i = 0;
list_for_each_entry_rcu(sta, &local->sta_list, list) {
list_for_each_entry_rcu(sta, &local->sta_list, list,
lockdep_is_held(&local->sta_mtx)) {
if (sdata != sta->sdata)
continue;
if (i < idx) {

View File

@ -876,12 +876,11 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
mpext->data_seq = mp_opt->data_seq;
mpext->subflow_seq = mp_opt->subflow_seq;
mpext->dsn64 = mp_opt->dsn64;
mpext->data_fin = mp_opt->data_fin;
}
mpext->data_len = mp_opt->data_len;
mpext->use_map = 1;
}
mpext->data_fin = mp_opt->data_fin;
}
void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts)

View File

@ -599,12 +599,14 @@ static int mptcp_nl_fill_addr(struct sk_buff *skb,
nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex))
goto nla_put_failure;
if (addr->family == AF_INET)
nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4,
addr->addr.s_addr);
if (addr->family == AF_INET &&
nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4,
addr->addr.s_addr))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
else if (addr->family == AF_INET6)
nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6);
else if (addr->family == AF_INET6 &&
nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6))
goto nla_put_failure;
#endif
nla_nest_end(skb, attr);
return 0;

View File

@ -1332,7 +1332,9 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
}
#endif
struct sock *mptcp_sk_clone(const struct sock *sk, struct request_sock *req)
struct sock *mptcp_sk_clone(const struct sock *sk,
const struct tcp_options_received *opt_rx,
struct request_sock *req)
{
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
@ -1355,26 +1357,30 @@ struct sock *mptcp_sk_clone(const struct sock *sk, struct request_sock *req)
msk->subflow = NULL;
if (unlikely(mptcp_token_new_accept(subflow_req->token, nsk))) {
nsk->sk_state = TCP_CLOSE;
bh_unlock_sock(nsk);
/* we can't call into mptcp_close() here - possible BH context
* free the sock directly
* free the sock directly.
* sk_clone_lock() sets nsk refcnt to two, hence call sk_free()
* too.
*/
nsk->sk_prot->destroy(nsk);
sk_common_release(nsk);
sk_free(nsk);
return NULL;
}
msk->write_seq = subflow_req->idsn + 1;
atomic64_set(&msk->snd_una, msk->write_seq);
if (subflow_req->remote_key_valid) {
if (opt_rx->mptcp.mp_capable) {
msk->can_ack = true;
msk->remote_key = subflow_req->remote_key;
msk->remote_key = opt_rx->mptcp.sndr_key;
mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
ack_seq++;
msk->ack_seq = ack_seq;
}
sock_reset_flag(nsk, SOCK_RCU_FREE);
/* will be fully established after successful MPC subflow creation */
inet_sk_state_store(nsk, TCP_SYN_RECV);
bh_unlock_sock(nsk);
@ -1431,6 +1437,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
newsk = new_mptcp_sock;
mptcp_copy_inaddrs(newsk, ssk);
list_add(&subflow->node, &msk->conn_list);
inet_sk_state_store(newsk, TCP_ESTABLISHED);
bh_unlock_sock(new_mptcp_sock);
@ -1775,6 +1782,8 @@ static int mptcp_listen(struct socket *sock, int backlog)
goto unlock;
}
sock_set_flag(sock->sk, SOCK_RCU_FREE);
err = ssock->ops->listen(ssock, backlog);
inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
if (!err)

View File

@ -206,12 +206,10 @@ struct mptcp_subflow_request_sock {
struct tcp_request_sock sk;
u16 mp_capable : 1,
mp_join : 1,
backup : 1,
remote_key_valid : 1;
backup : 1;
u8 local_id;
u8 remote_id;
u64 local_key;
u64 remote_key;
u64 idsn;
u32 token;
u32 ssn_offset;
@ -332,7 +330,9 @@ void mptcp_proto_init(void);
int mptcp_proto_v6_init(void);
#endif
struct sock *mptcp_sk_clone(const struct sock *sk, struct request_sock *req);
struct sock *mptcp_sk_clone(const struct sock *sk,
const struct tcp_options_received *opt_rx,
struct request_sock *req);
void mptcp_get_options(const struct sk_buff *skb,
struct tcp_options_received *opt_rx);

View File

@ -133,7 +133,6 @@ static void subflow_init_req(struct request_sock *req,
subflow_req->mp_capable = 0;
subflow_req->mp_join = 0;
subflow_req->remote_key_valid = 0;
#ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@ -347,6 +346,46 @@ static bool subflow_hmac_valid(const struct request_sock *req,
return ret;
}
static void mptcp_sock_destruct(struct sock *sk)
{
/* if new mptcp socket isn't accepted, it is free'd
* from the tcp listener sockets request queue, linked
* from req->sk. The tcp socket is released.
* This calls the ULP release function which will
* also remove the mptcp socket, via
* sock_put(ctx->conn).
*
* Problem is that the mptcp socket will not be in
* SYN_RECV state and doesn't have SOCK_DEAD flag.
* Both result in warnings from inet_sock_destruct.
*/
if (sk->sk_state == TCP_SYN_RECV) {
sk->sk_state = TCP_CLOSE;
WARN_ON_ONCE(sk->sk_socket);
sock_orphan(sk);
}
inet_sock_destruct(sk);
}
static void mptcp_force_close(struct sock *sk)
{
inet_sk_state_store(sk, TCP_CLOSE);
sk_common_release(sk);
}
static void subflow_ulp_fallback(struct sock *sk,
struct mptcp_subflow_context *old_ctx)
{
struct inet_connection_sock *icsk = inet_csk(sk);
mptcp_subflow_tcp_fallback(sk, old_ctx);
icsk->icsk_ulp_ops = NULL;
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
tcp_sk(sk)->is_mptcp = 0;
}
static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
@ -359,10 +398,12 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct tcp_options_received opt_rx;
bool fallback_is_fatal = false;
struct sock *new_msk = NULL;
bool fallback = false;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
opt_rx.mptcp.mp_capable = 0;
if (tcp_rsk(req)->is_mptcp == 0)
goto create_child;
@ -377,20 +418,16 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
goto create_msk;
}
opt_rx.mptcp.mp_capable = 0;
mptcp_get_options(skb, &opt_rx);
if (opt_rx.mptcp.mp_capable) {
subflow_req->remote_key = opt_rx.mptcp.sndr_key;
subflow_req->remote_key_valid = 1;
} else {
subflow_req->mp_capable = 0;
if (!opt_rx.mptcp.mp_capable) {
fallback = true;
goto create_child;
}
create_msk:
new_msk = mptcp_sk_clone(listener->conn, req);
new_msk = mptcp_sk_clone(listener->conn, &opt_rx, req);
if (!new_msk)
subflow_req->mp_capable = 0;
fallback = true;
} else if (subflow_req->mp_join) {
fallback_is_fatal = true;
opt_rx.mptcp.mp_join = 0;
@ -409,12 +446,18 @@ create_child:
if (child && *own_req) {
struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
/* we have null ctx on TCP fallback, which is fatal on
* MPJ handshake
/* we need to fallback on ctx allocation failure and on pre-reqs
* checking above. In the latter scenario we additionally need
* to reset the context to non MPTCP status.
*/
if (!ctx) {
if (!ctx || fallback) {
if (fallback_is_fatal)
goto close_child;
if (ctx) {
subflow_ulp_fallback(child, ctx);
kfree_rcu(ctx, rcu);
}
goto out;
}
@ -422,10 +465,17 @@ create_child:
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
inet_sk_state_store(new_msk, TCP_ESTABLISHED);
new_msk->sk_destruct = mptcp_sock_destruct;
mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
ctx->conn = new_msk;
new_msk = NULL;
/* with OoO packets we can reach here without ingress
* mpc option
*/
ctx->remote_key = opt_rx.mptcp.sndr_key;
ctx->fully_established = opt_rx.mptcp.mp_capable;
ctx->can_ack = opt_rx.mptcp.mp_capable;
} else if (ctx->mp_join) {
struct mptcp_sock *owner;
@ -444,7 +494,14 @@ create_child:
out:
/* dispose of the left over mptcp master, if any */
if (unlikely(new_msk))
sock_put(new_msk);
mptcp_force_close(new_msk);
/* check for expected invariant - should never trigger, just help
* catching eariler subtle bugs
*/
WARN_ON_ONCE(*own_req && child && tcp_sk(child)->is_mptcp &&
(!mptcp_subflow_ctx(child) ||
!mptcp_subflow_ctx(child)->conn));
return child;
close_child:
@ -1047,17 +1104,6 @@ static void subflow_ulp_release(struct sock *sk)
kfree_rcu(ctx, rcu);
}
static void subflow_ulp_fallback(struct sock *sk,
struct mptcp_subflow_context *old_ctx)
{
struct inet_connection_sock *icsk = inet_csk(sk);
mptcp_subflow_tcp_fallback(sk, old_ctx);
icsk->icsk_ulp_ops = NULL;
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
tcp_sk(sk)->is_mptcp = 0;
}
static void subflow_ulp_clone(const struct request_sock *req,
struct sock *newsk,
const gfp_t priority)
@ -1091,9 +1137,6 @@ static void subflow_ulp_clone(const struct request_sock *req,
* is fully established only after we receive the remote key
*/
new_ctx->mp_capable = 1;
new_ctx->fully_established = subflow_req->remote_key_valid;
new_ctx->can_ack = subflow_req->remote_key_valid;
new_ctx->remote_key = subflow_req->remote_key;
new_ctx->local_key = subflow_req->local_key;
new_ctx->token = subflow_req->token;
new_ctx->ssn_offset = subflow_req->ssn_offset;

View File

@ -421,10 +421,12 @@ void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
down_write(&flow_table->flow_block_lock);
block_cb = flow_block_cb_lookup(block, cb, cb_priv);
if (block_cb)
if (block_cb) {
list_del(&block_cb->list);
else
flow_block_cb_free(block_cb);
} else {
WARN_ON(true);
}
up_write(&flow_table->flow_block_lock);
}
EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);

View File

@ -1035,8 +1035,8 @@ int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops)
ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops,
ARRAY_SIZE(nf_nat_ipv4_ops));
if (ret)
nf_nat_ipv6_unregister_fn(net, ops);
nf_nat_unregister_fn(net, NFPROTO_IPV6, ops,
ARRAY_SIZE(nf_nat_ipv6_ops));
return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn);

View File

@ -14,6 +14,6 @@ config NETLABEL
Documentation/netlabel as well as the NetLabel SourceForge project
for configuration tools and additional documentation.
* http://netlabel.sf.net
* https://github.com/netlabel/netlabel_tools
If you are unsure, say N.

View File

@ -208,6 +208,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
/* refcount initialized at 1 */
spin_unlock_bh(&nr_node_list_lock);
nr_neigh_put(nr_neigh);
return 0;
}
nr_node_lock(nr_node);

View File

@ -1895,7 +1895,8 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
struct hlist_head *head = &info->limits[i];
struct ovs_ct_limit *ct_limit;
hlist_for_each_entry_rcu(ct_limit, head, hlist_node)
hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
lockdep_ovsl_is_held())
kfree_rcu(ct_limit, rcu);
}
kfree(ovs_net->ct_limit_info->limits);

View File

@ -2466,8 +2466,10 @@ static void __net_exit ovs_exit_net(struct net *dnet)
struct net *net;
LIST_HEAD(head);
ovs_ct_exit(dnet);
ovs_lock();
ovs_ct_exit(dnet);
list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
__dp_destroy(dp);

View File

@ -82,7 +82,7 @@ static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
if (q->skip_sock_check)
goto skip;
if (!sk)
if (!sk || !sk_fullsock(sk))
return false;
if (!sock_flag(sk, SOCK_TXTIME))
@ -137,8 +137,9 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
struct sock_exterr_skb *serr;
struct sk_buff *clone;
ktime_t txtime = skb->tstamp;
struct sock *sk = skb->sk;
if (!skb->sk || !(skb->sk->sk_txtime_report_errors))
if (!sk || !sk_fullsock(sk) || !(sk->sk_txtime_report_errors))
return;
clone = skb_clone(skb, GFP_ATOMIC);
@ -154,7 +155,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */
serr->ee.ee_info = txtime; /* low part of tstamp */
if (sock_queue_err_skb(skb->sk, clone))
if (sock_queue_err_skb(sk, clone))
kfree_skb(clone);
}

View File

@ -858,7 +858,11 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
struct sctp_chunk *retval;
__u32 ctsn;
ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
if (chunk && chunk->asoc)
ctsn = sctp_tsnmap_get_ctsn(&chunk->asoc->peer.tsn_map);
else
ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
shut.cum_tsn_ack = htonl(ctsn);
retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0,

View File

@ -1865,7 +1865,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
*/
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
SCTP_ST_CHUNK(0), NULL,
SCTP_ST_CHUNK(0), repl,
commands);
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@ -5470,7 +5470,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown(
* in the Cumulative TSN Ack field the last sequential TSN it
* has received from the peer.
*/
reply = sctp_make_shutdown(asoc, NULL);
reply = sctp_make_shutdown(asoc, arg);
if (!reply)
goto nomem;
@ -6068,7 +6068,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire(
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
arg, commands);
NULL, commands);
}
return disposition;

View File

@ -1712,6 +1712,7 @@ exit:
case -EBUSY:
this_cpu_inc(stats->stat[STAT_ASYNC]);
*skb = NULL;
tipc_aead_put(aead);
return rc;
default:
this_cpu_inc(stats->stat[STAT_NOK]);

View File

@ -2038,6 +2038,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
n = tipc_node_find_by_id(net, ehdr->id);
}
tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
tipc_node_put(n);
if (!skb)
return;
@ -2090,7 +2091,7 @@ rcv:
/* Check/update node state before receiving */
if (unlikely(skb)) {
if (unlikely(skb_linearize(skb)))
goto discard;
goto out_node_put;
tipc_node_write_lock(n);
if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
if (le->link) {
@ -2119,6 +2120,7 @@ rcv:
if (!skb_queue_empty(&xmitq))
tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
out_node_put:
tipc_node_put(n);
discard:
kfree_skb(skb);

View File

@ -115,8 +115,10 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
if (!pskb_may_pull(skb, 1))
if (!pskb_may_pull(skb, 1)) {
x25_neigh_put(nb);
return 0;
}
switch (skb->data[0]) {

View File

@ -479,6 +479,7 @@ static int do_unregister(int argc, char **argv)
static int do_register(int argc, char **argv)
{
struct bpf_object_load_attr load_attr = {};
const struct bpf_map_def *def;
struct bpf_map_info info = {};
__u32 info_len = sizeof(info);
@ -499,7 +500,12 @@ static int do_register(int argc, char **argv)
set_max_rlimit();
if (bpf_object__load(obj)) {
load_attr.obj = obj;
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
load_attr.log_level = 1 + 2 + 4;
if (bpf_object__load_xattr(&load_attr)) {
bpf_object__close(obj);
return -1;
}

Some files were not shown because too many files have changed in this diff Show More