Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Make sure SKB control block is in the proper state during IPSEC
    ESP-in-TCP encapsulation. From Sabrina Dubroca.

 2) Various kinds of attributes were not being cloned properly when we
    build new xfrm_state objects from existing ones. Fix from Antony
    Antony.

 3) Make sure to keep BTF sections, from Tony Ambardar.

 4) TX DMA channels need proper locking in lantiq driver, from Hauke
    Mehrtens.

 5) Honour route MTU during forwarding, always. From Maciej
    Żenczykowski.

 6) Fix races in kTLS which can result in crashes, from Rohit
    Maheshwari.

 7) Skip TCP DSACKs with rediculous sequence ranges, from Priyaranjan
    Jha.

 8) Use correct address family in xfrm state lookups, from Herbert Xu.

 9) A bridge FDB flush should not clear out user managed fdb entries
    with the ext_learn flag set, from Nikolay Aleksandrov.

10) Fix nested locking of netdev address lists, from Taehee Yoo.

11) Fix handling of 32-bit DATA_FIN values in mptcp, from Mat Martineau.

12) Fix r8169 data corruptions on RTL8402 chips, from Heiner Kallweit.

13) Don't free command entries in mlx5 while comp handler could still be
    running, from Eran Ben Elisha.

14) Error flow of request_irq() in mlx5 is busted, due to an off by one
    we try to free and IRQ never allocated. From Maor Gottlieb.

15) Fix leak when dumping netlink policies, from Johannes Berg.

16) Sendpage cannot be performed when a page is a slab page, or the page
    count is < 1. Some subsystems such as nvme were doing so. Create a
    "sendpage_ok()" helper and use it as needed, from Coly Li.

17) Don't leak request socket when using syncookes with mptcp, from
    Paolo Abeni.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (111 commits)
  net/core: check length before updating Ethertype in skb_mpls_{push,pop}
  net: mvneta: fix double free of txq->buf
  net_sched: check error pointer in tcf_dump_walker()
  net: team: fix memory leak in __team_options_register
  net: typhoon: Fix a typo Typoon --> Typhoon
  net: hinic: fix DEVLINK build errors
  net: stmmac: Modify configuration method of EEE timers
  tcp: fix syn cookied MPTCP request socket leak
  libceph: use sendpage_ok() in ceph_tcp_sendpage()
  scsi: libiscsi: use sendpage_ok() in iscsi_tcp_segment_map()
  drbd: code cleanup by using sendpage_ok() to check page for kernel_sendpage()
  tcp: use sendpage_ok() to detect misused .sendpage
  nvme-tcp: check page by sendpage_ok() before calling kernel_sendpage()
  net: add WARN_ONCE in kernel_sendpage() for improper zero-copy send
  net: introduce helper sendpage_ok() in include/linux/net.h
  net: usb: pegasus: Proper error handing when setting pegasus' MAC address
  net: core: document two new elements of struct net_device
  netlink: fix policy dump leak
  net/mlx5e: Fix race condition on nhe->n pointer in neigh update
  net/mlx5e: Fix VLAN create flow
  ...
This commit is contained in:
Linus Torvalds 2020-10-05 11:27:14 -07:00
commit 165563c050
140 changed files with 1387 additions and 697 deletions

View File

@ -21,6 +21,7 @@ Required properties:
- "renesas,etheravb-r8a774a1" for the R8A774A1 SoC. - "renesas,etheravb-r8a774a1" for the R8A774A1 SoC.
- "renesas,etheravb-r8a774b1" for the R8A774B1 SoC. - "renesas,etheravb-r8a774b1" for the R8A774B1 SoC.
- "renesas,etheravb-r8a774c0" for the R8A774C0 SoC. - "renesas,etheravb-r8a774c0" for the R8A774C0 SoC.
- "renesas,etheravb-r8a774e1" for the R8A774E1 SoC.
- "renesas,etheravb-r8a7795" for the R8A7795 SoC. - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
- "renesas,etheravb-r8a7796" for the R8A77960 SoC. - "renesas,etheravb-r8a7796" for the R8A77960 SoC.
- "renesas,etheravb-r8a77961" for the R8A77961 SoC. - "renesas,etheravb-r8a77961" for the R8A77961 SoC.

View File

@ -8752,7 +8752,8 @@ F: include/drm/i915*
F: include/uapi/drm/i915_drm.h F: include/uapi/drm/i915_drm.h
INTEL ETHERNET DRIVERS INTEL ETHERNET DRIVERS
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> M: Jesse Brandeburg <jesse.brandeburg@intel.com>
M: Tony Nguyen <anthony.l.nguyen@intel.com>
L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers) L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
S: Supported S: Supported
W: http://www.intel.com/support/feedback.htm W: http://www.intel.com/support/feedback.htm
@ -12077,6 +12078,7 @@ NETWORKING [DSA]
M: Andrew Lunn <andrew@lunn.ch> M: Andrew Lunn <andrew@lunn.ch>
M: Vivien Didelot <vivien.didelot@gmail.com> M: Vivien Didelot <vivien.didelot@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com>
M: Vladimir Oltean <olteanv@gmail.com>
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/net/dsa/ F: Documentation/devicetree/bindings/net/dsa/
F: drivers/net/dsa/ F: drivers/net/dsa/
@ -18282,7 +18284,8 @@ F: drivers/gpu/vga/vga_switcheroo.c
F: include/linux/vga_switcheroo.h F: include/linux/vga_switcheroo.h
VIA RHINE NETWORK DRIVER VIA RHINE NETWORK DRIVER
S: Orphan S: Maintained
M: Kevin Brace <kevinbrace@bracecomputerlab.com>
F: drivers/net/ethernet/via/via-rhine.c F: drivers/net/ethernet/via/via-rhine.c
VIA SD/MMC CARD CONTROLLER DRIVER VIA SD/MMC CARD CONTROLLER DRIVER

View File

@ -475,7 +475,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP | BPF_JSET | BPF_X:
true_cond = COND_NE; true_cond = COND_NE;
fallthrough;
cond_branch: cond_branch:
/* same targets, can avoid doing the test :) */ /* same targets, can avoid doing the test :) */
if (filter[i].jt == filter[i].jf) { if (filter[i].jt == filter[i].jf) {

View File

@ -1553,7 +1553,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or * put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced * __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */ * by someone, leading to some obscure delayed Oops somewhere else. */
if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page)) if (drbd_disable_sendpage || !sendpage_ok(page))
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags); return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL; msg_flags |= MSG_NOSIGNAL;

View File

@ -1320,9 +1320,10 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
} }
EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu); EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
static int get_lower_dev_vlan(struct net_device *lower_dev, void *data) static int get_lower_dev_vlan(struct net_device *lower_dev,
struct netdev_nested_priv *priv)
{ {
u16 *vlan_id = data; u16 *vlan_id = (u16 *)priv->data;
if (is_vlan_dev(lower_dev)) if (is_vlan_dev(lower_dev))
*vlan_id = vlan_dev_vlan_id(lower_dev); *vlan_id = vlan_dev_vlan_id(lower_dev);
@ -1348,6 +1349,9 @@ static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr, int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
u16 *vlan_id, u8 *smac) u16 *vlan_id, u8 *smac)
{ {
struct netdev_nested_priv priv = {
.data = (void *)vlan_id,
};
struct net_device *ndev; struct net_device *ndev;
rcu_read_lock(); rcu_read_lock();
@ -1368,7 +1372,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
* the lower vlan device for this gid entry. * the lower vlan device for this gid entry.
*/ */
netdev_walk_all_lower_dev_rcu(attr->ndev, netdev_walk_all_lower_dev_rcu(attr->ndev,
get_lower_dev_vlan, vlan_id); get_lower_dev_vlan, &priv);
} }
} }
rcu_read_unlock(); rcu_read_unlock();

View File

@ -2865,9 +2865,10 @@ struct iboe_prio_tc_map {
bool found; bool found;
}; };
static int get_lower_vlan_dev_tc(struct net_device *dev, void *data) static int get_lower_vlan_dev_tc(struct net_device *dev,
struct netdev_nested_priv *priv)
{ {
struct iboe_prio_tc_map *map = data; struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
if (is_vlan_dev(dev)) if (is_vlan_dev(dev))
map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
@ -2886,16 +2887,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
{ {
struct iboe_prio_tc_map prio_tc_map = {}; struct iboe_prio_tc_map prio_tc_map = {};
int prio = rt_tos2priority(tos); int prio = rt_tos2priority(tos);
struct netdev_nested_priv priv;
/* If VLAN device, get it directly from the VLAN netdev */ /* If VLAN device, get it directly from the VLAN netdev */
if (is_vlan_dev(ndev)) if (is_vlan_dev(ndev))
return get_vlan_ndev_tc(ndev, prio); return get_vlan_ndev_tc(ndev, prio);
prio_tc_map.input_prio = prio; prio_tc_map.input_prio = prio;
priv.data = (void *)&prio_tc_map;
rcu_read_lock(); rcu_read_lock();
netdev_walk_all_lower_dev_rcu(ndev, netdev_walk_all_lower_dev_rcu(ndev,
get_lower_vlan_dev_tc, get_lower_vlan_dev_tc,
&prio_tc_map); &priv);
rcu_read_unlock(); rcu_read_unlock();
/* If map is found from lower device, use it; Otherwise /* If map is found from lower device, use it; Otherwise
* continue with the current netdevice to get priority to tc map. * continue with the current netdevice to get priority to tc map.

View File

@ -531,10 +531,11 @@ struct upper_list {
struct net_device *upper; struct net_device *upper;
}; };
static int netdev_upper_walk(struct net_device *upper, void *data) static int netdev_upper_walk(struct net_device *upper,
struct netdev_nested_priv *priv)
{ {
struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
struct list_head *upper_list = data; struct list_head *upper_list = (struct list_head *)priv->data;
if (!entry) if (!entry)
return 0; return 0;
@ -553,12 +554,14 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
struct net_device *ndev)) struct net_device *ndev))
{ {
struct net_device *ndev = cookie; struct net_device *ndev = cookie;
struct netdev_nested_priv priv;
struct upper_list *upper_iter; struct upper_list *upper_iter;
struct upper_list *upper_temp; struct upper_list *upper_temp;
LIST_HEAD(upper_list); LIST_HEAD(upper_list);
priv.data = &upper_list;
rcu_read_lock(); rcu_read_lock();
netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list); netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &priv);
rcu_read_unlock(); rcu_read_unlock();
handle_netdev(ib_dev, port, ndev); handle_netdev(ib_dev, port, ndev);

View File

@ -342,9 +342,10 @@ struct ipoib_walk_data {
struct net_device *result; struct net_device *result;
}; };
static int ipoib_upper_walk(struct net_device *upper, void *_data) static int ipoib_upper_walk(struct net_device *upper,
struct netdev_nested_priv *priv)
{ {
struct ipoib_walk_data *data = _data; struct ipoib_walk_data *data = (struct ipoib_walk_data *)priv->data;
int ret = 0; int ret = 0;
if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) { if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
@ -368,10 +369,12 @@ static int ipoib_upper_walk(struct net_device *upper, void *_data)
static struct net_device *ipoib_get_net_dev_match_addr( static struct net_device *ipoib_get_net_dev_match_addr(
const struct sockaddr *addr, struct net_device *dev) const struct sockaddr *addr, struct net_device *dev)
{ {
struct netdev_nested_priv priv;
struct ipoib_walk_data data = { struct ipoib_walk_data data = {
.addr = addr, .addr = addr,
}; };
priv.data = (void *)&data;
rcu_read_lock(); rcu_read_lock();
if (ipoib_is_dev_match_addr_rcu(addr, dev)) { if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
dev_hold(dev); dev_hold(dev);
@ -379,7 +382,7 @@ static struct net_device *ipoib_get_net_dev_match_addr(
goto out; goto out;
} }
netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data); netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &priv);
out: out:
rcu_read_unlock(); rcu_read_unlock();
return data.result; return data.result;

View File

@ -942,9 +942,10 @@ struct alb_walk_data {
bool strict_match; bool strict_match;
}; };
static int alb_upper_dev_walk(struct net_device *upper, void *_data) static int alb_upper_dev_walk(struct net_device *upper,
struct netdev_nested_priv *priv)
{ {
struct alb_walk_data *data = _data; struct alb_walk_data *data = (struct alb_walk_data *)priv->data;
bool strict_match = data->strict_match; bool strict_match = data->strict_match;
struct bonding *bond = data->bond; struct bonding *bond = data->bond;
struct slave *slave = data->slave; struct slave *slave = data->slave;
@ -983,6 +984,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
bool strict_match) bool strict_match)
{ {
struct bonding *bond = bond_get_bond_by_slave(slave); struct bonding *bond = bond_get_bond_by_slave(slave);
struct netdev_nested_priv priv;
struct alb_walk_data data = { struct alb_walk_data data = {
.strict_match = strict_match, .strict_match = strict_match,
.mac_addr = mac_addr, .mac_addr = mac_addr,
@ -990,6 +992,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
.bond = bond, .bond = bond,
}; };
priv.data = (void *)&data;
/* send untagged */ /* send untagged */
alb_send_lp_vid(slave, mac_addr, 0, 0); alb_send_lp_vid(slave, mac_addr, 0, 0);
@ -997,7 +1000,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
* for that device. * for that device.
*/ */
rcu_read_lock(); rcu_read_lock();
netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &data); netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &priv);
rcu_read_unlock(); rcu_read_unlock();
} }

View File

@ -1315,6 +1315,7 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
bond_dev->type = slave_dev->type; bond_dev->type = slave_dev->type;
bond_dev->hard_header_len = slave_dev->hard_header_len; bond_dev->hard_header_len = slave_dev->hard_header_len;
bond_dev->needed_headroom = slave_dev->needed_headroom;
bond_dev->addr_len = slave_dev->addr_len; bond_dev->addr_len = slave_dev->addr_len;
memcpy(bond_dev->broadcast, slave_dev->broadcast, memcpy(bond_dev->broadcast, slave_dev->broadcast,
@ -2510,22 +2511,26 @@ re_arm:
} }
} }
static int bond_upper_dev_walk(struct net_device *upper, void *data) static int bond_upper_dev_walk(struct net_device *upper,
struct netdev_nested_priv *priv)
{ {
__be32 ip = *((__be32 *)data); __be32 ip = *(__be32 *)priv->data;
return ip == bond_confirm_addr(upper, 0, ip); return ip == bond_confirm_addr(upper, 0, ip);
} }
static bool bond_has_this_ip(struct bonding *bond, __be32 ip) static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
{ {
struct netdev_nested_priv priv = {
.data = (void *)&ip,
};
bool ret = false; bool ret = false;
if (ip == bond_confirm_addr(bond->dev, 0, ip)) if (ip == bond_confirm_addr(bond->dev, 0, ip))
return true; return true;
rcu_read_lock(); rcu_read_lock();
if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &ip)) if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
ret = true; ret = true;
rcu_read_unlock(); rcu_read_unlock();

View File

@ -387,8 +387,8 @@ EXPORT_SYMBOL(ksz_switch_alloc);
int ksz_switch_register(struct ksz_device *dev, int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops) const struct ksz_dev_ops *ops)
{ {
struct device_node *port, *ports;
phy_interface_t interface; phy_interface_t interface;
struct device_node *port;
unsigned int port_num; unsigned int port_num;
int ret; int ret;
@ -429,13 +429,17 @@ int ksz_switch_register(struct ksz_device *dev,
ret = of_get_phy_mode(dev->dev->of_node, &interface); ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0) if (ret == 0)
dev->compat_interface = interface; dev->compat_interface = interface;
for_each_available_child_of_node(dev->dev->of_node, port) { ports = of_get_child_by_name(dev->dev->of_node, "ports");
if (of_property_read_u32(port, "reg", &port_num)) if (ports)
continue; for_each_available_child_of_node(ports, port) {
if (port_num >= dev->port_cnt) if (of_property_read_u32(port, "reg",
return -EINVAL; &port_num))
of_get_phy_mode(port, &dev->ports[port_num].interface); continue;
} if (port_num >= dev->port_cnt)
return -EINVAL;
of_get_phy_mode(port,
&dev->ports[port_num].interface);
}
dev->synclko_125 = of_property_read_bool(dev->dev->of_node, dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125"); "microchip,synclko-125");
} }

View File

@ -685,12 +685,12 @@ static struct vcap_field vsc9959_vcap_is2_actions[] = {
[VCAP_IS2_ACT_POLICE_ENA] = { 9, 1}, [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
[VCAP_IS2_ACT_POLICE_IDX] = { 10, 9}, [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
[VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1}, [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
[VCAP_IS2_ACT_PORT_MASK] = { 20, 11}, [VCAP_IS2_ACT_PORT_MASK] = { 20, 6},
[VCAP_IS2_ACT_REW_OP] = { 31, 9}, [VCAP_IS2_ACT_REW_OP] = { 26, 9},
[VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1}, [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 35, 1},
[VCAP_IS2_ACT_RSV] = { 41, 2}, [VCAP_IS2_ACT_RSV] = { 36, 2},
[VCAP_IS2_ACT_ACL_ID] = { 43, 6}, [VCAP_IS2_ACT_ACL_ID] = { 38, 6},
[VCAP_IS2_ACT_HIT_CNT] = { 49, 32}, [VCAP_IS2_ACT_HIT_CNT] = { 44, 32},
}; };
static const struct vcap_props vsc9959_vcap_props[] = { static const struct vcap_props vsc9959_vcap_props[] = {
@ -1284,8 +1284,28 @@ void vsc9959_mdio_bus_free(struct ocelot *ocelot)
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
u32 speed) u32 speed)
{ {
u8 tas_speed;
switch (speed) {
case SPEED_10:
tas_speed = OCELOT_SPEED_10;
break;
case SPEED_100:
tas_speed = OCELOT_SPEED_100;
break;
case SPEED_1000:
tas_speed = OCELOT_SPEED_1000;
break;
case SPEED_2500:
tas_speed = OCELOT_SPEED_2500;
break;
default:
tas_speed = OCELOT_SPEED_1000;
break;
}
ocelot_rmw_rix(ocelot, ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(speed), QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M, QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port); QSYS_TAG_CONFIG, port);
} }

View File

@ -706,7 +706,7 @@ static const struct vcap_props vsc9953_vcap_props[] = {
.action_type_width = 1, .action_type_width = 1,
.action_table = { .action_table = {
[IS2_ACTION_TYPE_NORMAL] = { [IS2_ACTION_TYPE_NORMAL] = {
.width = 44, .width = 50, /* HIT_CNT not included */
.count = 2 .count = 2
}, },
[IS2_ACTION_TYPE_SMAC_SIP] = { [IS2_ACTION_TYPE_SMAC_SIP] = {

View File

@ -33,7 +33,7 @@ struct basic_ring {
u32 lastWrite; u32 lastWrite;
}; };
/* The Typoon transmit ring -- same as a basic ring, plus: /* The Typhoon transmit ring -- same as a basic ring, plus:
* lastRead: where we're at in regard to cleaning up the ring * lastRead: where we're at in regard to cleaning up the ring
* writeRegister: register to use for writing (different for Hi & Lo rings) * writeRegister: register to use for writing (different for Hi & Lo rings)
*/ */

View File

@ -8,7 +8,7 @@
obj-$(CONFIG_AQTION) += atlantic.o obj-$(CONFIG_AQTION) += atlantic.o
ccflags-y += -I$(src) ccflags-y += -I$(srctree)/$(src)
atlantic-objs := aq_main.o \ atlantic-objs := aq_main.o \
aq_nic.o \ aq_nic.o \
@ -33,4 +33,4 @@ atlantic-objs := aq_main.o \
atlantic-$(CONFIG_MACSEC) += aq_macsec.o atlantic-$(CONFIG_MACSEC) += aq_macsec.o
atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o

View File

@ -284,12 +284,12 @@
#define CCM_REG_GR_ARB_TYPE 0xd015c #define CCM_REG_GR_ARB_TYPE 0xd015c
/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the /* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
highest priority is 3. It is supposed; that the Store channel priority is highest priority is 3. It is supposed; that the Store channel priority is
the compliment to 4 of the rest priorities - Aggregation channel; Load the complement to 4 of the rest priorities - Aggregation channel; Load
(FIC0) channel and Load (FIC1). */ (FIC0) channel and Load (FIC1). */
#define CCM_REG_GR_LD0_PR 0xd0164 #define CCM_REG_GR_LD0_PR 0xd0164
/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the /* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
highest priority is 3. It is supposed; that the Store channel priority is highest priority is 3. It is supposed; that the Store channel priority is
the compliment to 4 of the rest priorities - Aggregation channel; Load the complement to 4 of the rest priorities - Aggregation channel; Load
(FIC0) channel and Load (FIC1). */ (FIC0) channel and Load (FIC1). */
#define CCM_REG_GR_LD1_PR 0xd0168 #define CCM_REG_GR_LD1_PR 0xd0168
/* [RW 2] General flags index. */ /* [RW 2] General flags index. */
@ -4489,11 +4489,11 @@
#define TCM_REG_GR_ARB_TYPE 0x50114 #define TCM_REG_GR_ARB_TYPE 0x50114
/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the /* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
highest priority is 3. It is supposed that the Store channel is the highest priority is 3. It is supposed that the Store channel is the
compliment of the other 3 groups. */ complement of the other 3 groups. */
#define TCM_REG_GR_LD0_PR 0x5011c #define TCM_REG_GR_LD0_PR 0x5011c
/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the /* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
highest priority is 3. It is supposed that the Store channel is the highest priority is 3. It is supposed that the Store channel is the
compliment of the other 3 groups. */ complement of the other 3 groups. */
#define TCM_REG_GR_LD1_PR 0x50120 #define TCM_REG_GR_LD1_PR 0x50120
/* [RW 4] The number of double REG-pairs; loaded from the STORM context and /* [RW 4] The number of double REG-pairs; loaded from the STORM context and
sent to STORM; for a specific connection type. The double REG-pairs are sent to STORM; for a specific connection type. The double REG-pairs are
@ -5020,11 +5020,11 @@
#define UCM_REG_GR_ARB_TYPE 0xe0144 #define UCM_REG_GR_ARB_TYPE 0xe0144
/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the /* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
highest priority is 3. It is supposed that the Store channel group is highest priority is 3. It is supposed that the Store channel group is
compliment to the others. */ complement to the others. */
#define UCM_REG_GR_LD0_PR 0xe014c #define UCM_REG_GR_LD0_PR 0xe014c
/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the /* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
highest priority is 3. It is supposed that the Store channel group is highest priority is 3. It is supposed that the Store channel group is
compliment to the others. */ complement to the others. */
#define UCM_REG_GR_LD1_PR 0xe0150 #define UCM_REG_GR_LD1_PR 0xe0150
/* [RW 2] The queue index for invalidate counter flag decision. */ /* [RW 2] The queue index for invalidate counter flag decision. */
#define UCM_REG_INV_CFLG_Q 0xe00e4 #define UCM_REG_INV_CFLG_Q 0xe00e4
@ -5523,11 +5523,11 @@
#define XCM_REG_GR_ARB_TYPE 0x2020c #define XCM_REG_GR_ARB_TYPE 0x2020c
/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the /* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
highest priority is 3. It is supposed that the Channel group is the highest priority is 3. It is supposed that the Channel group is the
compliment of the other 3 groups. */ complement of the other 3 groups. */
#define XCM_REG_GR_LD0_PR 0x20214 #define XCM_REG_GR_LD0_PR 0x20214
/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the /* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
highest priority is 3. It is supposed that the Channel group is the highest priority is 3. It is supposed that the Channel group is the
compliment of the other 3 groups. */ complement of the other 3 groups. */
#define XCM_REG_GR_LD1_PR 0x20218 #define XCM_REG_GR_LD1_PR 0x20218
/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is /* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
disregarded; acknowledge output is deasserted; all other signals are disregarded; acknowledge output is deasserted; all other signals are

View File

@ -1219,7 +1219,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
*/ */
if (netdev->phydev) { if (netdev->phydev) {
netif_carrier_off(netdev); netif_carrier_off(netdev);
phy_start_aneg(netdev->phydev); phy_start(netdev->phydev);
} }
netif_wake_queue(netdev); netif_wake_queue(netdev);
@ -1247,8 +1247,10 @@ static int octeon_mgmt_stop(struct net_device *netdev)
napi_disable(&p->napi); napi_disable(&p->napi);
netif_stop_queue(netdev); netif_stop_queue(netdev);
if (netdev->phydev) if (netdev->phydev) {
phy_stop(netdev->phydev);
phy_disconnect(netdev->phydev); phy_disconnect(netdev->phydev);
}
netif_carrier_off(netdev); netif_carrier_off(netdev);

View File

@ -11,9 +11,11 @@
#define DPNI_VER_MAJOR 7 #define DPNI_VER_MAJOR 7
#define DPNI_VER_MINOR 0 #define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1 #define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMD_2ND_VERSION 2
#define DPNI_CMD_ID_OFFSET 4 #define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) #define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
#define DPNI_CMDID_OPEN DPNI_CMD(0x801) #define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) #define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
@ -45,7 +47,7 @@
#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) #define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) #define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A) #define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B) #define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) #define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) #define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)

View File

@ -229,7 +229,7 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* Return all Fs if nothing was there */ /* Return all Fs if nothing was there */
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) && if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
!priv->has_a011043) { !priv->has_a011043) {
dev_err(&bus->dev, dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n", "Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum); phy_id, dev_addr, regnum);
return 0xffff; return 0xffff;

View File

@ -6,6 +6,7 @@
config HINIC config HINIC
tristate "Huawei Intelligent PCIE Network Interface Card" tristate "Huawei Intelligent PCIE Network Interface Card"
depends on (PCI_MSI && (X86 || ARM64)) depends on (PCI_MSI && (X86 || ARM64))
select NET_DEVLINK
help help
This driver supports HiNIC PCIE Ethernet cards. This driver supports HiNIC PCIE Ethernet cards.
To compile this driver as part of the kernel, choose Y here. To compile this driver as part of the kernel, choose Y here.

View File

@ -58,9 +58,9 @@ static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
sizeof(port_mac_cmd), sizeof(port_mac_cmd),
&port_mac_cmd, &out_size); &port_mac_cmd, &out_size);
if (err || out_size != sizeof(port_mac_cmd) || if (err || out_size != sizeof(port_mac_cmd) ||
(port_mac_cmd.status && (port_mac_cmd.status &&
port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY && (port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY || !HINIC_IS_VF(hwif)) &&
port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) { port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) {
dev_err(&pdev->dev, "Failed to change MAC, err: %d, status: 0x%x, out size: 0x%x\n", dev_err(&pdev->dev, "Failed to change MAC, err: %d, status: 0x%x, out size: 0x%x\n",
err, port_mac_cmd.status, out_size); err, port_mac_cmd.status, out_size);
return -EFAULT; return -EFAULT;

View File

@ -38,8 +38,7 @@ static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info, err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
sizeof(mac_info), &mac_info, &out_size); sizeof(mac_info), &mac_info, &out_size);
if (err || out_size != sizeof(mac_info) || if (err || out_size != sizeof(mac_info) ||
(mac_info.status && mac_info.status != HINIC_PF_SET_VF_ALREADY && (mac_info.status && mac_info.status != HINIC_MGMT_STATUS_EXIST)) {
mac_info.status != HINIC_MGMT_STATUS_EXIST)) {
dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x\n", dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x\n",
err, mac_info.status, out_size); err, mac_info.status, out_size);
return -EIO; return -EIO;
@ -503,8 +502,7 @@ struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
static int hinic_check_mac_info(u8 status, u16 vlan_id) static int hinic_check_mac_info(u8 status, u16 vlan_id)
{ {
if ((status && status != HINIC_MGMT_STATUS_EXIST && if ((status && status != HINIC_MGMT_STATUS_EXIST) ||
status != HINIC_PF_SET_VF_ALREADY) ||
(vlan_id & CHECK_IPSU_15BIT && (vlan_id & CHECK_IPSU_15BIT &&
status == HINIC_MGMT_STATUS_EXIST)) status == HINIC_MGMT_STATUS_EXIST))
return -EINVAL; return -EINVAL;
@ -546,12 +544,6 @@ static int hinic_update_mac(struct hinic_hwdev *hwdev, u8 *old_mac,
return -EINVAL; return -EINVAL;
} }
if (mac_info.status == HINIC_PF_SET_VF_ALREADY) {
dev_warn(&hwdev->hwif->pdev->dev,
"PF has already set VF MAC. Ignore update operation\n");
return HINIC_PF_SET_VF_ALREADY;
}
if (mac_info.status == HINIC_MGMT_STATUS_EXIST) if (mac_info.status == HINIC_MGMT_STATUS_EXIST)
dev_warn(&hwdev->hwif->pdev->dev, "MAC is repeated. Ignore update operation\n"); dev_warn(&hwdev->hwif->pdev->dev, "MAC is repeated. Ignore update operation\n");

View File

@ -3806,8 +3806,8 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
static int __maybe_unused iavf_resume(struct device *dev_d) static int __maybe_unused iavf_resume(struct device *dev_d)
{ {
struct pci_dev *pdev = to_pci_dev(dev_d); struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev; struct iavf_adapter *adapter = netdev_priv(netdev);
u32 err; u32 err;
pci_set_master(pdev); pci_set_master(pdev);

View File

@ -2288,26 +2288,28 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
{ {
struct ice_hw_func_caps *func_caps = &hw->func_caps; struct ice_hw_func_caps *func_caps = &hw->func_caps;
struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
u32 valid_func, rxq_first_id, txq_first_id; struct ice_hw_common_caps cached_caps;
u32 msix_vector_first_id, max_mtu;
u32 num_funcs; u32 num_funcs;
/* cache some func_caps values that should be restored after memset */ /* cache some func_caps values that should be restored after memset */
valid_func = func_caps->common_cap.valid_functions; cached_caps = func_caps->common_cap;
txq_first_id = func_caps->common_cap.txq_first_id;
rxq_first_id = func_caps->common_cap.rxq_first_id;
msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
max_mtu = func_caps->common_cap.max_mtu;
/* unset func capabilities */ /* unset func capabilities */
memset(func_caps, 0, sizeof(*func_caps)); memset(func_caps, 0, sizeof(*func_caps));
#define ICE_RESTORE_FUNC_CAP(name) \
func_caps->common_cap.name = cached_caps.name
/* restore cached values */ /* restore cached values */
func_caps->common_cap.valid_functions = valid_func; ICE_RESTORE_FUNC_CAP(valid_functions);
func_caps->common_cap.txq_first_id = txq_first_id; ICE_RESTORE_FUNC_CAP(txq_first_id);
func_caps->common_cap.rxq_first_id = rxq_first_id; ICE_RESTORE_FUNC_CAP(rxq_first_id);
func_caps->common_cap.msix_vector_first_id = msix_vector_first_id; ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
func_caps->common_cap.max_mtu = max_mtu; ICE_RESTORE_FUNC_CAP(max_mtu);
ICE_RESTORE_FUNC_CAP(nvm_unified_update);
ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
/* one Tx and one Rx queue in safe mode */ /* one Tx and one Rx queue in safe mode */
func_caps->common_cap.num_rxq = 1; func_caps->common_cap.num_rxq = 1;
@ -2318,22 +2320,25 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
func_caps->guar_num_vsi = 1; func_caps->guar_num_vsi = 1;
/* cache some dev_caps values that should be restored after memset */ /* cache some dev_caps values that should be restored after memset */
valid_func = dev_caps->common_cap.valid_functions; cached_caps = dev_caps->common_cap;
txq_first_id = dev_caps->common_cap.txq_first_id;
rxq_first_id = dev_caps->common_cap.rxq_first_id;
msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
max_mtu = dev_caps->common_cap.max_mtu;
num_funcs = dev_caps->num_funcs; num_funcs = dev_caps->num_funcs;
/* unset dev capabilities */ /* unset dev capabilities */
memset(dev_caps, 0, sizeof(*dev_caps)); memset(dev_caps, 0, sizeof(*dev_caps));
#define ICE_RESTORE_DEV_CAP(name) \
dev_caps->common_cap.name = cached_caps.name
/* restore cached values */ /* restore cached values */
dev_caps->common_cap.valid_functions = valid_func; ICE_RESTORE_DEV_CAP(valid_functions);
dev_caps->common_cap.txq_first_id = txq_first_id; ICE_RESTORE_DEV_CAP(txq_first_id);
dev_caps->common_cap.rxq_first_id = rxq_first_id; ICE_RESTORE_DEV_CAP(rxq_first_id);
dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; ICE_RESTORE_DEV_CAP(msix_vector_first_id);
dev_caps->common_cap.max_mtu = max_mtu; ICE_RESTORE_DEV_CAP(max_mtu);
ICE_RESTORE_DEV_CAP(nvm_unified_update);
ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
dev_caps->num_funcs = num_funcs; dev_caps->num_funcs = num_funcs;
/* one Tx and one Rx queue per function in safe mode */ /* one Tx and one Rx queue per function in safe mode */

View File

@ -289,7 +289,13 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
return -EIO; return -EIO;
} }
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, HZ, &event); /* In most cases, firmware reports a write completion within a few
* milliseconds. However, it has been observed that a completion might
* take more than a second to complete in some cases. The timeout here
* is conservative and is intended to prevent failure to update when
* firmware is slow to respond.
*/
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event);
if (err) { if (err) {
dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n", dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n",
module, err); module, err);
@ -513,7 +519,7 @@ static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
return -EIO; return -EIO;
} }
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, HZ, err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ,
&event); &event);
if (err) { if (err) {
dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n", dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",

View File

@ -246,7 +246,7 @@ static int ice_get_free_slot(void *array, int size, int curr)
* ice_vsi_delete - delete a VSI from the switch * ice_vsi_delete - delete a VSI from the switch
* @vsi: pointer to VSI being removed * @vsi: pointer to VSI being removed
*/ */
void ice_vsi_delete(struct ice_vsi *vsi) static void ice_vsi_delete(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt; struct ice_vsi_ctx *ctxt;
@ -313,7 +313,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
* *
* Returns 0 on success, negative on failure * Returns 0 on success, negative on failure
*/ */
int ice_vsi_clear(struct ice_vsi *vsi) static int ice_vsi_clear(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = NULL; struct ice_pf *pf = NULL;
struct device *dev; struct device *dev;
@ -563,7 +563,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
* ice_vsi_put_qs - Release queues from VSI to PF * ice_vsi_put_qs - Release queues from VSI to PF
* @vsi: the VSI that is going to release queues * @vsi: the VSI that is going to release queues
*/ */
void ice_vsi_put_qs(struct ice_vsi *vsi) static void ice_vsi_put_qs(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int i; int i;
@ -1196,6 +1196,18 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
{ {
int i; int i;
/* Avoid stale references by clearing map from vector to ring */
if (vsi->q_vectors) {
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (q_vector) {
q_vector->tx.ring = NULL;
q_vector->rx.ring = NULL;
}
}
}
if (vsi->tx_rings) { if (vsi->tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++) { for (i = 0; i < vsi->alloc_txq; i++) {
if (vsi->tx_rings[i]) { if (vsi->tx_rings[i]) {
@ -2291,7 +2303,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (status) { if (status) {
dev_err(dev, "VSI %d failed lan queue config, error %s\n", dev_err(dev, "VSI %d failed lan queue config, error %s\n",
vsi->vsi_num, ice_stat_str(status)); vsi->vsi_num, ice_stat_str(status));
goto unroll_vector_base; goto unroll_clear_rings;
} }
/* Add switch rule to drop all Tx Flow Control Frames, of look up /* Add switch rule to drop all Tx Flow Control Frames, of look up

View File

@ -45,10 +45,6 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
@ -79,8 +75,6 @@ bool ice_is_reset_in_progress(unsigned long *state);
void void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio); ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio);
void ice_vsi_put_qs(struct ice_vsi *vsi);
void ice_vsi_dis_irq(struct ice_vsi *vsi); void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi); void ice_vsi_free_irq(struct ice_vsi *vsi);

View File

@ -3169,10 +3169,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
return -EBUSY; return -EBUSY;
vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
if (!vsi) { if (!vsi)
status = -ENOMEM; return -ENOMEM;
goto unroll_vsi_setup;
}
status = ice_cfg_netdev(vsi); status = ice_cfg_netdev(vsi);
if (status) { if (status) {
@ -3219,12 +3217,7 @@ unroll_napi_add:
} }
unroll_vsi_setup: unroll_vsi_setup:
if (vsi) { ice_vsi_release(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_delete(vsi);
ice_vsi_put_qs(vsi);
ice_vsi_clear(vsi);
}
return status; return status;
} }
@ -4522,6 +4515,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
} }
ice_clear_interrupt_scheme(pf); ice_clear_interrupt_scheme(pf);
pci_save_state(pdev);
pci_wake_from_d3(pdev, pf->wol_ena); pci_wake_from_d3(pdev, pf->wol_ena);
pci_set_power_state(pdev, PCI_D3hot); pci_set_power_state(pdev, PCI_D3hot);
return 0; return 0;

View File

@ -5396,9 +5396,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
return err; return err;
} }
static int ixgbe_macvlan_up(struct net_device *vdev, void *data) static int ixgbe_macvlan_up(struct net_device *vdev,
struct netdev_nested_priv *priv)
{ {
struct ixgbe_adapter *adapter = data; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
struct ixgbe_fwd_adapter *accel; struct ixgbe_fwd_adapter *accel;
if (!netif_is_macvlan(vdev)) if (!netif_is_macvlan(vdev))
@ -5415,8 +5416,12 @@ static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
{ {
struct netdev_nested_priv priv = {
.data = (void *)adapter,
};
netdev_walk_all_upper_dev_rcu(adapter->netdev, netdev_walk_all_upper_dev_rcu(adapter->netdev,
ixgbe_macvlan_up, adapter); ixgbe_macvlan_up, &priv);
} }
static void ixgbe_configure(struct ixgbe_adapter *adapter) static void ixgbe_configure(struct ixgbe_adapter *adapter)
@ -9023,9 +9028,10 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
} }
#endif /* CONFIG_IXGBE_DCB */ #endif /* CONFIG_IXGBE_DCB */
static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
struct netdev_nested_priv *priv)
{ {
struct ixgbe_adapter *adapter = data; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
struct ixgbe_fwd_adapter *accel; struct ixgbe_fwd_adapter *accel;
int pool; int pool;
@ -9062,13 +9068,16 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
static void ixgbe_defrag_macvlan_pools(struct net_device *dev) static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
struct netdev_nested_priv priv = {
.data = (void *)adapter,
};
/* flush any stale bits out of the fwd bitmask */ /* flush any stale bits out of the fwd bitmask */
bitmap_clear(adapter->fwd_bitmask, 1, 63); bitmap_clear(adapter->fwd_bitmask, 1, 63);
/* walk through upper devices reassigning pools */ /* walk through upper devices reassigning pools */
netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool, netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
adapter); &priv);
} }
/** /**
@ -9242,14 +9251,18 @@ struct upper_walk_data {
u8 queue; u8 queue;
}; };
static int get_macvlan_queue(struct net_device *upper, void *_data) static int get_macvlan_queue(struct net_device *upper,
struct netdev_nested_priv *priv)
{ {
if (netif_is_macvlan(upper)) { if (netif_is_macvlan(upper)) {
struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
struct upper_walk_data *data = _data; struct ixgbe_adapter *adapter;
struct ixgbe_adapter *adapter = data->adapter; struct upper_walk_data *data;
int ifindex = data->ifindex; int ifindex;
data = (struct upper_walk_data *)priv->data;
ifindex = data->ifindex;
adapter = data->adapter;
if (vadapter && upper->ifindex == ifindex) { if (vadapter && upper->ifindex == ifindex) {
data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
data->action = data->queue; data->action = data->queue;
@ -9265,6 +9278,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
{ {
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int num_vfs = adapter->num_vfs, vf; unsigned int num_vfs = adapter->num_vfs, vf;
struct netdev_nested_priv priv;
struct upper_walk_data data; struct upper_walk_data data;
struct net_device *upper; struct net_device *upper;
@ -9284,8 +9298,9 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
data.ifindex = ifindex; data.ifindex = ifindex;
data.action = 0; data.action = 0;
data.queue = 0; data.queue = 0;
priv.data = (void *)&data;
if (netdev_walk_all_upper_dev_rcu(adapter->netdev, if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
get_macvlan_queue, &data)) { get_macvlan_queue, &priv)) {
*action = data.action; *action = data.action;
*queue = data.queue; *queue = data.queue;

View File

@ -245,6 +245,7 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
int pkts = 0; int pkts = 0;
int bytes = 0; int bytes = 0;
netif_tx_lock(net_dev);
while (pkts < budget) { while (pkts < budget) {
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
@ -268,6 +269,7 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
net_dev->stats.tx_bytes += bytes; net_dev->stats.tx_bytes += bytes;
netdev_completed_queue(ch->priv->net_dev, pkts, bytes); netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
netif_tx_unlock(net_dev);
if (netif_queue_stopped(net_dev)) if (netif_queue_stopped(net_dev))
netif_wake_queue(net_dev); netif_wake_queue(net_dev);

View File

@ -3400,24 +3400,15 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->last_desc = txq->size - 1; txq->last_desc = txq->size - 1;
txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
if (!txq->buf) { if (!txq->buf)
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
return -ENOMEM; return -ENOMEM;
}
/* Allocate DMA buffers for TSO MAC/IP/TCP headers */ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * TSO_HEADER_SIZE, txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL); &txq->tso_hdrs_phys, GFP_KERNEL);
if (!txq->tso_hdrs) { if (!txq->tso_hdrs)
kfree(txq->buf);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
return -ENOMEM; return -ENOMEM;
}
/* Setup XPS mapping */ /* Setup XPS mapping */
if (txq_number > 1) if (txq_number > 1)

View File

@ -17,7 +17,7 @@
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{ {
void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct otx2_mbox_dev *mdev = &mbox->dev[devid];
@ -26,13 +26,21 @@ void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
tx_hdr = hw_mbase + mbox->tx_start; tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start; rx_hdr = hw_mbase + mbox->rx_start;
spin_lock(&mdev->mbox_lock);
mdev->msg_size = 0; mdev->msg_size = 0;
mdev->rsp_size = 0; mdev->rsp_size = 0;
tx_hdr->num_msgs = 0; tx_hdr->num_msgs = 0;
tx_hdr->msg_size = 0; tx_hdr->msg_size = 0;
rx_hdr->num_msgs = 0; rx_hdr->num_msgs = 0;
rx_hdr->msg_size = 0; rx_hdr->msg_size = 0;
}
EXPORT_SYMBOL(__otx2_mbox_reset);
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
spin_lock(&mdev->mbox_lock);
__otx2_mbox_reset(mbox, devid);
spin_unlock(&mdev->mbox_lock); spin_unlock(&mdev->mbox_lock);
} }
EXPORT_SYMBOL(otx2_mbox_reset); EXPORT_SYMBOL(otx2_mbox_reset);

View File

@ -93,6 +93,7 @@ struct mbox_msghdr {
}; };
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid); void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
void otx2_mbox_destroy(struct otx2_mbox *mbox); void otx2_mbox_destroy(struct otx2_mbox *mbox);
int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
struct pci_dev *pdev, void __force *reg_base, struct pci_dev *pdev, void __force *reg_base,

View File

@ -463,6 +463,7 @@ void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu); int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr); int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
/* NPC APIs */ /* NPC APIs */
int rvu_npc_init(struct rvu *rvu); int rvu_npc_init(struct rvu *rvu);
@ -477,7 +478,7 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan); int nixlf, u64 chan);
void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc); void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf); int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);

View File

@ -17,7 +17,6 @@
#include "npc.h" #include "npc.h"
#include "cgx.h" #include "cgx.h"
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id); int type, int chan_id);
@ -2020,7 +2019,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0; return 0;
} }
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
{ {
int err = 0, idx, next_idx, last_idx; int err = 0, idx, next_idx, last_idx;
struct nix_mce_list *mce_list; struct nix_mce_list *mce_list;
@ -2065,7 +2064,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
/* Disable MCAM entry in NPC */ /* Disable MCAM entry in NPC */
if (!mce_list->count) { if (!mce_list->count) {
rvu_npc_disable_bcast_entry(rvu, pcifunc); rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
goto end; goto end;
} }

View File

@ -530,7 +530,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
NIX_INTF_RX, &entry, true); NIX_INTF_RX, &entry, true);
} }
void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc) void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
{ {
struct npc_mcam *mcam = &rvu->hw->mcam; struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index; int blkaddr, index;
@ -543,7 +543,7 @@ void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc)
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY); index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
} }
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
@ -622,23 +622,35 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY); nixlf, NIXLF_UCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
/* For PF, ena/dis promisc and bcast MCAM match entries */ /* For PF, ena/dis promisc and bcast MCAM match entries.
if (pcifunc & RVU_PFVF_FUNC_MASK) * For VFs add/delete from bcast list when RX multicast
* feature is present.
*/
if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast)
return; return;
/* For bcast, enable/disable only if it's action is not /* For bcast, enable/disable only if it's action is not
* packet replication, incase if action is replication * packet replication, incase if action is replication
* then this PF's nixlf is removed from bcast replication * then this PF/VF's nixlf is removed from bcast replication
* list. * list.
*/ */
index = npc_get_nixlf_mcam_index(mcam, pcifunc, index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
nixlf, NIXLF_BCAST_ENTRY); nixlf, NIXLF_BCAST_ENTRY);
bank = npc_get_bank(mcam, index); bank = npc_get_bank(mcam, index);
*(u64 *)&action = rvu_read64(rvu, blkaddr, *(u64 *)&action = rvu_read64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank)); NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
if (action.op != NIX_RX_ACTIONOP_MCAST)
/* VFs will not have BCAST entry */
if (action.op != NIX_RX_ACTIONOP_MCAST &&
!(pcifunc & RVU_PFVF_FUNC_MASK)) {
npc_enable_mcam_entry(rvu, mcam, npc_enable_mcam_entry(rvu, mcam,
blkaddr, index, enable); blkaddr, index, enable);
} else {
nix_update_bcast_mce_list(rvu, pcifunc, enable);
/* Enable PF's BCAST entry for packet replication */
rvu_npc_enable_bcast_entry(rvu, pcifunc, enable);
}
if (enable) if (enable)
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf); rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
else else

View File

@ -370,8 +370,8 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
dst_mbox = &pf->mbox; dst_mbox = &pf->mbox;
dst_size = dst_mbox->mbox.tx_size - dst_size = dst_mbox->mbox.tx_size -
ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
/* Check if msgs fit into destination area */ /* Check if msgs fit into destination area and has valid size */
if (mbox_hdr->msg_size > dst_size) if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
return -EINVAL; return -EINVAL;
dst_mdev = &dst_mbox->mbox.dev[0]; dst_mdev = &dst_mbox->mbox.dev[0];
@ -526,10 +526,10 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
end: end:
offset = mbox->rx_start + msg->next_msgoff; offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++; mdev->msgs_acked++;
} }
otx2_mbox_reset(mbox, vf_idx);
} }
static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
@ -803,10 +803,11 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
msg = (struct mbox_msghdr *)(mdev->mbase + offset); msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg); otx2_process_pfaf_mbox_msg(pf, msg);
offset = mbox->rx_start + msg->next_msgoff; offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++; mdev->msgs_acked++;
} }
otx2_mbox_reset(mbox, 0);
} }
static void otx2_handle_link_event(struct otx2_nic *pf) static void otx2_handle_link_event(struct otx2_nic *pf)
@ -1560,10 +1561,13 @@ int otx2_open(struct net_device *netdev)
err = otx2_rxtx_enable(pf, true); err = otx2_rxtx_enable(pf, true);
if (err) if (err)
goto err_free_cints; goto err_tx_stop_queues;
return 0; return 0;
err_tx_stop_queues:
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
err_free_cints: err_free_cints:
otx2_free_cints(pf, qidx); otx2_free_cints(pf, qidx);
vec = pci_irq_vector(pf->pdev, vec = pci_irq_vector(pf->pdev,

View File

@ -524,6 +524,7 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM; sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
} else if (skb->protocol == htons(ETH_P_IPV6)) { } else if (skb->protocol == htons(ETH_P_IPV6)) {
proto = ipv6_hdr(skb)->nexthdr; proto = ipv6_hdr(skb)->nexthdr;
sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
} }
if (proto == IPPROTO_TCP) if (proto == IPPROTO_TCP)

View File

@ -99,10 +99,10 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
msg = (struct mbox_msghdr *)(mdev->mbase + offset); msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff; offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++; mdev->msgs_acked++;
} }
otx2_mbox_reset(mbox, 0);
} }
static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,

View File

@ -69,12 +69,10 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
}; };
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, static struct mlx5_cmd_work_ent *
struct mlx5_cmd_msg *in, cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, struct mlx5_cmd_msg *out, void *uout, int uout_size,
void *uout, int uout_size, mlx5_cmd_cbk_t cbk, void *context, int page_queue)
mlx5_cmd_cbk_t cbk,
void *context, int page_queue)
{ {
gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_work_ent *ent;
@ -83,6 +81,7 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
if (!ent) if (!ent)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ent->idx = -EINVAL;
ent->in = in; ent->in = in;
ent->out = out; ent->out = out;
ent->uout = uout; ent->uout = uout;
@ -91,10 +90,16 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
ent->context = context; ent->context = context;
ent->cmd = cmd; ent->cmd = cmd;
ent->page_queue = page_queue; ent->page_queue = page_queue;
refcount_set(&ent->refcnt, 1);
return ent; return ent;
} }
static void cmd_free_ent(struct mlx5_cmd_work_ent *ent)
{
kfree(ent);
}
static u8 alloc_token(struct mlx5_cmd *cmd) static u8 alloc_token(struct mlx5_cmd *cmd)
{ {
u8 token; u8 token;
@ -109,7 +114,7 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
return token; return token;
} }
static int alloc_ent(struct mlx5_cmd *cmd) static int cmd_alloc_index(struct mlx5_cmd *cmd)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -123,7 +128,7 @@ static int alloc_ent(struct mlx5_cmd *cmd)
return ret < cmd->max_reg_cmds ? ret : -ENOMEM; return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
} }
static void free_ent(struct mlx5_cmd *cmd, int idx) static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{ {
unsigned long flags; unsigned long flags;
@ -132,6 +137,22 @@ static void free_ent(struct mlx5_cmd *cmd, int idx)
spin_unlock_irqrestore(&cmd->alloc_lock, flags); spin_unlock_irqrestore(&cmd->alloc_lock, flags);
} }
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
{
refcount_inc(&ent->refcnt);
}
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
{
if (!refcount_dec_and_test(&ent->refcnt))
return;
if (ent->idx >= 0)
cmd_free_index(ent->cmd, ent->idx);
cmd_free_ent(ent);
}
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
{ {
return cmd->cmd_buf + (idx << cmd->log_stride); return cmd->cmd_buf + (idx << cmd->log_stride);
@ -219,11 +240,6 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
ent->ret = -ETIMEDOUT; ent->ret = -ETIMEDOUT;
} }
static void free_cmd(struct mlx5_cmd_work_ent *ent)
{
kfree(ent);
}
static int verify_signature(struct mlx5_cmd_work_ent *ent) static int verify_signature(struct mlx5_cmd_work_ent *ent)
{ {
struct mlx5_cmd_mailbox *next = ent->out->next; struct mlx5_cmd_mailbox *next = ent->out->next;
@ -837,11 +853,22 @@ static void cb_timeout_handler(struct work_struct *work)
struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
cmd); cmd);
mlx5_cmd_eq_recover(dev);
/* Maybe got handled by eq recover ? */
if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
goto out; /* phew, already handled */
}
ent->ret = -ETIMEDOUT; ent->ret = -ETIMEDOUT;
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)), ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
msg_to_opcode(ent->in));
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
out:
cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
} }
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
@ -856,6 +883,32 @@ static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
return cmd->allowed_opcode == opcode; return cmd->allowed_opcode == opcode;
} }
static int cmd_alloc_index_retry(struct mlx5_cmd *cmd)
{
unsigned long alloc_end = jiffies + msecs_to_jiffies(1000);
int idx;
retry:
idx = cmd_alloc_index(cmd);
if (idx < 0 && time_before(jiffies, alloc_end)) {
/* Index allocation can fail on heavy load of commands. This is a temporary
* situation as the current command already holds the semaphore, meaning that
* another command completion is being handled and it is expected to release
* the entry index soon.
*/
cpu_relax();
goto retry;
}
return idx;
}
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
{
return pci_channel_offline(dev->pdev) ||
dev->cmd.state != MLX5_CMDIF_STATE_UP ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR;
}
static void cmd_work_handler(struct work_struct *work) static void cmd_work_handler(struct work_struct *work)
{ {
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@ -873,14 +926,14 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem); down(sem);
if (!ent->page_queue) { if (!ent->page_queue) {
alloc_ret = alloc_ent(cmd); alloc_ret = cmd_alloc_index_retry(cmd);
if (alloc_ret < 0) { if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n"); mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) { if (ent->callback) {
ent->callback(-EAGAIN, ent->context); ent->callback(-EAGAIN, ent->context);
mlx5_free_cmd_msg(dev, ent->out); mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in); free_msg(dev, ent->in);
free_cmd(ent); cmd_ent_put(ent);
} else { } else {
ent->ret = -EAGAIN; ent->ret = -EAGAIN;
complete(&ent->done); complete(&ent->done);
@ -916,15 +969,12 @@ static void cmd_work_handler(struct work_struct *work)
ent->ts1 = ktime_get_ns(); ent->ts1 = ktime_get_ns();
cmd_mode = cmd->mode; cmd_mode = cmd->mode;
if (ent->callback) if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
/* Skip sending command to fw if internal error */ /* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) || if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
cmd->state != MLX5_CMDIF_STATE_UP ||
!opcode_allowed(&dev->cmd, ent->op)) {
u8 status = 0; u8 status = 0;
u32 drv_synd; u32 drv_synd;
@ -933,13 +983,10 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
/* no doorbell, no need to keep the entry */
free_ent(cmd, ent->idx);
if (ent->callback)
free_cmd(ent);
return; return;
} }
cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* ring doorbell after the descriptor is valid */ /* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb(); wmb();
@ -983,6 +1030,35 @@ static const char *deliv_status_to_str(u8 status)
} }
} }
enum {
MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000,
};
static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent)
{
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC);
mlx5_cmd_eq_recover(dev);
/* Re-wait on the ent->done after executing the recovery flow. If the
* recovery flow (or any other recovery flow running simultaneously)
* has recovered an EQE, it should cause the entry to be completed by
* the command interface.
*/
if (wait_for_completion_timeout(&ent->done, timeout)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
return;
}
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
ent->ret = -ETIMEDOUT;
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{ {
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@ -994,12 +1070,10 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
ent->ret = -ECANCELED; ent->ret = -ECANCELED;
goto out_err; goto out_err;
} }
if (cmd->mode == CMD_MODE_POLLING || ent->polling) { if (cmd->mode == CMD_MODE_POLLING || ent->polling)
wait_for_completion(&ent->done); wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) { else if (!wait_for_completion_timeout(&ent->done, timeout))
ent->ret = -ETIMEDOUT; wait_func_handle_exec_timeout(dev, ent);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
out_err: out_err:
err = ent->ret; err = ent->ret;
@ -1039,11 +1113,16 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (callback && page_queue) if (callback && page_queue)
return -EINVAL; return -EINVAL;
ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
page_queue); callback, context, page_queue);
if (IS_ERR(ent)) if (IS_ERR(ent))
return PTR_ERR(ent); return PTR_ERR(ent);
/* put for this ent is when consumed, depending on the use case
* 1) (!callback) blocking flow: by caller after wait_func completes
* 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
*/
ent->token = token; ent->token = token;
ent->polling = force_polling; ent->polling = force_polling;
@ -1062,12 +1141,10 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
} }
if (callback) if (callback)
goto out; goto out; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent); err = wait_func(dev, ent);
if (err == -ETIMEDOUT) if (err == -ETIMEDOUT || err == -ECANCELED)
goto out;
if (err == -ECANCELED)
goto out_free; goto out_free;
ds = ent->ts2 - ent->ts1; ds = ent->ts2 - ent->ts1;
@ -1085,7 +1162,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
*status = ent->status; *status = ent->status;
out_free: out_free:
free_cmd(ent); cmd_ent_put(ent);
out: out:
return err; return err;
} }
@ -1516,14 +1593,19 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
if (!forced) { if (!forced) {
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx); ent->idx);
free_ent(cmd, ent->idx); cmd_ent_put(ent);
free_cmd(ent);
} }
continue; continue;
} }
if (ent->callback) if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
cancel_delayed_work(&ent->cb_timeout_work); cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */
pci_channel_offline(dev->pdev) || /* FW is inaccessible */
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
cmd_ent_put(ent);
if (ent->page_queue) if (ent->page_queue)
sem = &cmd->pages_sem; sem = &cmd->pages_sem;
else else
@ -1545,10 +1627,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
ent->ret, deliv_status_to_str(ent->status), ent->status); ent->ret, deliv_status_to_str(ent->status), ent->status);
} }
/* only real completion will free the entry slot */
if (!forced)
free_ent(cmd, ent->idx);
if (ent->callback) { if (ent->callback) {
ds = ent->ts2 - ent->ts1; ds = ent->ts2 - ent->ts1;
if (ent->op < MLX5_CMD_OP_MAX) { if (ent->op < MLX5_CMD_OP_MAX) {
@ -1576,10 +1654,13 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
free_msg(dev, ent->in); free_msg(dev, ent->in);
err = err ? err : ent->status; err = err ? err : ent->status;
if (!forced) /* final consumer is done, release ent */
free_cmd(ent); cmd_ent_put(ent);
callback(err, context); callback(err, context);
} else { } else {
/* release wait_func() so mlx5_cmd_invoke()
* can make the final ent_put()
*/
complete(&ent->done); complete(&ent->done);
} }
up(sem); up(sem);
@ -1589,8 +1670,11 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{ {
struct mlx5_cmd *cmd = &dev->cmd;
unsigned long bitmask;
unsigned long flags; unsigned long flags;
u64 vector; u64 vector;
int i;
/* wait for pending handlers to complete */ /* wait for pending handlers to complete */
mlx5_eq_synchronize_cmd_irq(dev); mlx5_eq_synchronize_cmd_irq(dev);
@ -1599,11 +1683,20 @@ void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
if (!vector) if (!vector)
goto no_trig; goto no_trig;
bitmask = vector;
/* we must increment the allocated entries refcount before triggering the completions
* to guarantee pending commands will not get freed in the meanwhile.
* For that reason, it also has to be done inside the alloc_lock.
*/
for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
cmd_ent_get(cmd->ent_arr[i]);
vector |= MLX5_TRIGGERED_CMD_COMP; vector |= MLX5_TRIGGERED_CMD_COMP;
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
mlx5_core_dbg(dev, "vector 0x%llx\n", vector); mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
mlx5_cmd_comp_handler(dev, vector, true); mlx5_cmd_comp_handler(dev, vector, true);
for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
cmd_ent_put(cmd->ent_arr[i]);
return; return;
no_trig: no_trig:
@ -1711,10 +1804,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
u8 token; u8 token;
opcode = MLX5_GET(mbox_in, in, opcode); opcode = MLX5_GET(mbox_in, in, opcode);
if (pci_channel_offline(dev->pdev) || if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
dev->cmd.state != MLX5_CMDIF_STATE_UP ||
!opcode_allowed(&dev->cmd, opcode)) {
err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
MLX5_SET(mbox_out, out, status, status); MLX5_SET(mbox_out, out, status, status);
MLX5_SET(mbox_out, out, syndrome, drv_synd); MLX5_SET(mbox_out, out, syndrome, drv_synd);

View File

@ -91,7 +91,12 @@ struct page_pool;
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
* WQEs, This page will absorb write overflow by the hardware, when
* receiving packets larger than MTU. These oversize packets are
* dropped by the driver at a later stage.
*/
#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS \ #define MLX5E_MAX_RQ_NUM_MTTS \
@ -617,6 +622,7 @@ struct mlx5e_rq {
u32 rqn; u32 rqn;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey; struct mlx5_core_mkey umr_mkey;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */ /* XDP read-mostly */
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;

View File

@ -569,6 +569,9 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
if (fec_policy >= (1 << MLX5E_FEC_LLRS_272_257_1) && !fec_50g_per_lane) if (fec_policy >= (1 << MLX5E_FEC_LLRS_272_257_1) && !fec_50g_per_lane)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (fec_policy && !mlx5e_fec_in_caps(dev, fec_policy))
return -EOPNOTSUPP;
MLX5_SET(pplm_reg, in, local_port, 1); MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0); err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err) if (err)

View File

@ -110,11 +110,25 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
rtnl_unlock(); rtnl_unlock();
} }
struct neigh_update_work {
struct work_struct work;
struct neighbour *n;
struct mlx5e_neigh_hash_entry *nhe;
};
static void mlx5e_release_neigh_update_work(struct neigh_update_work *update_work)
{
neigh_release(update_work->n);
mlx5e_rep_neigh_entry_release(update_work->nhe);
kfree(update_work);
}
static void mlx5e_rep_neigh_update(struct work_struct *work) static void mlx5e_rep_neigh_update(struct work_struct *work)
{ {
struct mlx5e_neigh_hash_entry *nhe = struct neigh_update_work *update_work = container_of(work, struct neigh_update_work,
container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work); work);
struct neighbour *n = nhe->n; struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
struct neighbour *n = update_work->n;
struct mlx5e_encap_entry *e; struct mlx5e_encap_entry *e;
unsigned char ha[ETH_ALEN]; unsigned char ha[ETH_ALEN];
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
@ -146,30 +160,42 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
mlx5e_rep_update_flows(priv, e, neigh_connected, ha); mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
mlx5e_encap_put(priv, e); mlx5e_encap_put(priv, e);
} }
mlx5e_rep_neigh_entry_release(nhe);
rtnl_unlock(); rtnl_unlock();
neigh_release(n); mlx5e_release_neigh_update_work(update_work);
} }
static void mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv, static struct neigh_update_work *mlx5e_alloc_neigh_update_work(struct mlx5e_priv *priv,
struct mlx5e_neigh_hash_entry *nhe, struct neighbour *n)
struct neighbour *n)
{ {
/* Take a reference to ensure the neighbour and mlx5 encap struct neigh_update_work *update_work;
* entry won't be destructed until we drop the reference in struct mlx5e_neigh_hash_entry *nhe;
* delayed work. struct mlx5e_neigh m_neigh = {};
*/
neigh_hold(n);
/* This assignment is valid as long as the the neigh reference update_work = kzalloc(sizeof(*update_work), GFP_ATOMIC);
* is taken if (WARN_ON(!update_work))
*/ return NULL;
nhe->n = n;
if (!queue_work(priv->wq, &nhe->neigh_update_work)) { m_neigh.dev = n->dev;
mlx5e_rep_neigh_entry_release(nhe); m_neigh.family = n->ops->family;
neigh_release(n); memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
/* Obtain reference to nhe as last step in order not to release it in
* atomic context.
*/
rcu_read_lock();
nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
rcu_read_unlock();
if (!nhe) {
kfree(update_work);
return NULL;
} }
INIT_WORK(&update_work->work, mlx5e_rep_neigh_update);
neigh_hold(n);
update_work->n = n;
update_work->nhe = nhe;
return update_work;
} }
static int mlx5e_rep_netevent_event(struct notifier_block *nb, static int mlx5e_rep_netevent_event(struct notifier_block *nb,
@ -181,7 +207,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
struct net_device *netdev = rpriv->netdev; struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe = NULL; struct mlx5e_neigh_hash_entry *nhe = NULL;
struct mlx5e_neigh m_neigh = {}; struct neigh_update_work *update_work;
struct neigh_parms *p; struct neigh_parms *p;
struct neighbour *n; struct neighbour *n;
bool found = false; bool found = false;
@ -196,17 +222,11 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
#endif #endif
return NOTIFY_DONE; return NOTIFY_DONE;
m_neigh.dev = n->dev; update_work = mlx5e_alloc_neigh_update_work(priv, n);
m_neigh.family = n->ops->family; if (!update_work)
memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
rcu_read_lock();
nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
rcu_read_unlock();
if (!nhe)
return NOTIFY_DONE; return NOTIFY_DONE;
mlx5e_rep_queue_neigh_update_work(priv, nhe, n); queue_work(priv->wq, &update_work->work);
break; break;
case NETEVENT_DELAY_PROBE_TIME_UPDATE: case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@ -352,7 +372,6 @@ int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
(*nhe)->priv = priv; (*nhe)->priv = priv;
memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
spin_lock_init(&(*nhe)->encap_list_lock); spin_lock_init(&(*nhe)->encap_list_lock);
INIT_LIST_HEAD(&(*nhe)->encap_list); INIT_LIST_HEAD(&(*nhe)->encap_list);
refcount_set(&(*nhe)->refcnt, 1); refcount_set(&(*nhe)->refcnt, 1);

View File

@ -246,8 +246,10 @@ mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple,
case FLOW_ACT_MANGLE_HDR_TYPE_IP6: case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
ip6_offset = (offset - offsetof(struct ipv6hdr, saddr)); ip6_offset = (offset - offsetof(struct ipv6hdr, saddr));
ip6_offset /= 4; ip6_offset /= 4;
if (ip6_offset < 8) if (ip6_offset < 4)
tuple->ip.src_v6.s6_addr32[ip6_offset] = cpu_to_be32(val); tuple->ip.src_v6.s6_addr32[ip6_offset] = cpu_to_be32(val);
else if (ip6_offset < 8)
tuple->ip.dst_v6.s6_addr32[ip6_offset - 4] = cpu_to_be32(val);
else else
return -EOPNOTSUPP; return -EOPNOTSUPP;
break; break;

View File

@ -217,6 +217,9 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break; break;
} }
if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
return 0;
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) { if (IS_ERR(*rule_p)) {
@ -397,8 +400,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
if (priv->fs.vlan.cvlan_filter_disabled && if (priv->fs.vlan.cvlan_filter_disabled)
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_add_any_vid_rules(priv); mlx5e_add_any_vid_rules(priv);
} }
@ -415,8 +417,12 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
if (priv->fs.vlan.cvlan_filter_disabled && WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
!(priv->netdev->flags & IFF_PROMISC))
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
if (priv->fs.vlan.cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv); mlx5e_del_any_vid_rules(priv);
} }

View File

@ -246,12 +246,17 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u64 npages, u8 page_shift, u64 npages, u8 page_shift,
struct mlx5_core_mkey *umr_mkey) struct mlx5_core_mkey *umr_mkey,
dma_addr_t filler_addr)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_mtt *mtt;
int inlen;
void *mkc; void *mkc;
u32 *in; u32 *in;
int err; int err;
int i;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
@ -271,6 +276,18 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, translations_octword_size, MLX5_SET(mkc, mkc, translations_octword_size,
MLX5_MTT_OCTW(npages)); MLX5_MTT_OCTW(npages));
MLX5_SET(mkc, mkc, log_page_size, page_shift); MLX5_SET(mkc, mkc, log_page_size, page_shift);
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
MLX5_MTT_OCTW(npages));
/* Initialize the mkey with all MTTs pointing to a default
* page (filler_addr). When the channels are activated, UMR
* WQEs will redirect the RX WQEs to the actual memory from
* the RQ's pool, while the gaps (wqe_overflow) remain mapped
* to the default page.
*/
mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0 ; i < npages ; i++)
mtt[i].ptag = cpu_to_be64(filler_addr);
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
@ -282,7 +299,8 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
{ {
u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey,
rq->wqe_overflow.addr);
} }
static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
@ -350,6 +368,28 @@ static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_rq_cqe_err(rq); mlx5e_reporter_rq_cqe_err(rq);
} }
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
if (!rq->wqe_overflow.page)
return -ENOMEM;
rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
PAGE_SIZE, rq->buff.map_dir);
if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
__free_page(rq->wqe_overflow.page);
return -ENOMEM;
}
return 0;
}
static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
rq->buff.map_dir);
__free_page(rq->wqe_overflow.page);
}
static int mlx5e_alloc_rq(struct mlx5e_channel *c, static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
@ -396,7 +436,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK; rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix); err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix);
if (err < 0) if (err < 0)
goto err_rq_wq_destroy; goto err_rq_xdp_prog;
rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
@ -406,6 +446,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl); &rq->wq_ctrl);
if (err)
goto err_rq_xdp;
err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
if (err) if (err)
goto err_rq_wq_destroy; goto err_rq_wq_destroy;
@ -424,18 +468,18 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5e_create_rq_umr_mkey(mdev, rq); err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err) if (err)
goto err_rq_wq_destroy; goto err_rq_drop_page;
rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
err = mlx5e_rq_alloc_mpwqe_info(rq, c); err = mlx5e_rq_alloc_mpwqe_info(rq, c);
if (err) if (err)
goto err_free; goto err_rq_mkey;
break; break;
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl); &rq->wq_ctrl);
if (err) if (err)
goto err_rq_wq_destroy; goto err_rq_xdp;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@ -450,19 +494,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
GFP_KERNEL, cpu_to_node(c->cpu)); GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->wqe.frags) { if (!rq->wqe.frags) {
err = -ENOMEM; err = -ENOMEM;
goto err_free; goto err_rq_wq_destroy;
} }
err = mlx5e_init_di_list(rq, wq_sz, c->cpu); err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
if (err) if (err)
goto err_free; goto err_rq_frags;
rq->mkey_be = c->mkey_be; rq->mkey_be = c->mkey_be;
} }
err = mlx5e_rq_set_handlers(rq, params, xsk); err = mlx5e_rq_set_handlers(rq, params, xsk);
if (err) if (err)
goto err_free; goto err_free_by_rq_type;
if (xsk) { if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
@ -486,13 +530,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (IS_ERR(rq->page_pool)) { if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool); err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL; rq->page_pool = NULL;
goto err_free; goto err_free_by_rq_type;
} }
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool); MEM_TYPE_PAGE_POOL, rq->page_pool);
} }
if (err) if (err)
goto err_free; goto err_free_by_rq_type;
for (i = 0; i < wq_sz; i++) { for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@ -542,23 +586,27 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
return 0; return 0;
err_free: err_free_by_rq_type:
switch (rq->wq_type) { switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info); kvfree(rq->mpwqe.info);
err_rq_mkey:
mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break; break;
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
mlx5e_free_di_list(rq); mlx5e_free_di_list(rq);
err_rq_frags:
kvfree(rq->wqe.frags);
} }
err_rq_wq_destroy: err_rq_wq_destroy:
mlx5_wq_destroy(&rq->wq_ctrl);
err_rq_xdp:
xdp_rxq_info_unreg(&rq->xdp_rxq);
err_rq_xdp_prog:
if (params->xdp_prog) if (params->xdp_prog)
bpf_prog_put(params->xdp_prog); bpf_prog_put(params->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
return err; return err;
} }
@ -580,6 +628,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info); kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
mlx5e_free_mpwqe_rq_drop_page(rq);
break; break;
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags); kvfree(rq->wqe.frags);
@ -4177,6 +4226,21 @@ int mlx5e_get_vf_stats(struct net_device *dev,
} }
#endif #endif
static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
struct sk_buff *skb)
{
switch (skb->inner_protocol) {
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TEB):
return true;
case htons(ETH_P_MPLS_UC):
case htons(ETH_P_MPLS_MC):
return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
}
return false;
}
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb, struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
@ -4199,7 +4263,9 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) { switch (proto) {
case IPPROTO_GRE: case IPPROTO_GRE:
return features; if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
return features;
break;
case IPPROTO_IPIP: case IPPROTO_IPIP:
case IPPROTO_IPV6: case IPPROTO_IPV6:
if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))

View File

@ -135,12 +135,6 @@ struct mlx5e_neigh_hash_entry {
/* encap list sharing the same neigh */ /* encap list sharing the same neigh */
struct list_head encap_list; struct list_head encap_list;
/* valid only when the neigh reference is taken during
* neigh_update_work workqueue callback.
*/
struct neighbour *n;
struct work_struct neigh_update_work;
/* neigh hash entry can be deleted only when the refcount is zero. /* neigh hash entry can be deleted only when the refcount is zero.
* refcount is needed to avoid neigh hash entry removal by TC, while * refcount is needed to avoid neigh hash entry removal by TC, while
* it's used by the neigh notification call. * it's used by the neigh notification call.

View File

@ -189,6 +189,29 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
return count_eqe; return count_eqe;
} }
static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, unsigned long *flags)
__acquires(&eq->lock)
{
if (in_irq())
spin_lock(&eq->lock);
else
spin_lock_irqsave(&eq->lock, *flags);
}
static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, unsigned long *flags)
__releases(&eq->lock)
{
if (in_irq())
spin_unlock(&eq->lock);
else
spin_unlock_irqrestore(&eq->lock, *flags);
}
enum async_eq_nb_action {
ASYNC_EQ_IRQ_HANDLER = 0,
ASYNC_EQ_RECOVER = 1,
};
static int mlx5_eq_async_int(struct notifier_block *nb, static int mlx5_eq_async_int(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
@ -198,11 +221,14 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
struct mlx5_eq_table *eqt; struct mlx5_eq_table *eqt;
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
struct mlx5_eqe *eqe; struct mlx5_eqe *eqe;
unsigned long flags;
int num_eqes = 0; int num_eqes = 0;
dev = eq->dev; dev = eq->dev;
eqt = dev->priv.eq_table; eqt = dev->priv.eq_table;
mlx5_eq_async_int_lock(eq_async, &flags);
eqe = next_eqe_sw(eq); eqe = next_eqe_sw(eq);
if (!eqe) if (!eqe)
goto out; goto out;
@ -223,8 +249,19 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
out: out:
eq_update_ci(eq, 1); eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, &flags);
return 0; return unlikely(action == ASYNC_EQ_RECOVER) ? num_eqes : 0;
}
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
{
struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
int eqes;
eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
if (eqes)
mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
} }
static void init_eq_buf(struct mlx5_eq *eq) static void init_eq_buf(struct mlx5_eq *eq)
@ -569,6 +606,7 @@ setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
int err; int err;
eq->irq_nb.notifier_call = mlx5_eq_async_int; eq->irq_nb.notifier_call = mlx5_eq_async_int;
spin_lock_init(&eq->lock);
err = create_async_eq(dev, &eq->core, param); err = create_async_eq(dev, &eq->core, param);
if (err) { if (err) {
@ -656,8 +694,10 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
cleanup_async_eq(dev, &table->pages_eq, "pages"); cleanup_async_eq(dev, &table->pages_eq, "pages");
cleanup_async_eq(dev, &table->async_eq, "async"); cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
mlx5_cmd_use_polling(dev); mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd"); cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
} }

View File

@ -37,6 +37,7 @@ struct mlx5_eq {
struct mlx5_eq_async { struct mlx5_eq_async {
struct mlx5_eq core; struct mlx5_eq core;
struct notifier_block irq_nb; struct notifier_block irq_nb;
spinlock_t lock; /* To avoid irq EQ handle races with resiliency flows */
}; };
struct mlx5_eq_comp { struct mlx5_eq_comp {
@ -81,6 +82,7 @@ void mlx5_cq_tasklet_cb(unsigned long data);
struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix); struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev); void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev); void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);

View File

@ -432,7 +432,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 npages; u32 npages;
u32 i = 0; u32 i = 0;
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) if (!mlx5_cmd_is_down(dev))
return mlx5_cmd_exec(dev, in, in_size, out, out_size); return mlx5_cmd_exec(dev, in, in_size, out, out_size);
/* No hard feelings, we want our pages back! */ /* No hard feelings, we want our pages back! */

View File

@ -115,7 +115,7 @@ static int request_irqs(struct mlx5_core_dev *dev, int nvec)
return 0; return 0;
err_request_irq: err_request_irq:
for (; i >= 0; i--) { while (i--) {
struct mlx5_irq *irq = mlx5_irq_get(dev, i); struct mlx5_irq *irq = mlx5_irq_get(dev, i);
int irqn = pci_irq_vector(dev->pdev, i); int irqn = pci_irq_vector(dev->pdev, i);

View File

@ -3690,13 +3690,13 @@ bool mlxsw_sp_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
} }
static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
struct netdev_nested_priv *priv)
{ {
struct mlxsw_sp_port **p_mlxsw_sp_port = data;
int ret = 0; int ret = 0;
if (mlxsw_sp_port_dev_check(lower_dev)) { if (mlxsw_sp_port_dev_check(lower_dev)) {
*p_mlxsw_sp_port = netdev_priv(lower_dev); priv->data = (void *)netdev_priv(lower_dev);
ret = 1; ret = 1;
} }
@ -3705,15 +3705,16 @@ static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{ {
struct mlxsw_sp_port *mlxsw_sp_port; struct netdev_nested_priv priv = {
.data = NULL,
};
if (mlxsw_sp_port_dev_check(dev)) if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev); return netdev_priv(dev);
mlxsw_sp_port = NULL; netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
return mlxsw_sp_port; return (struct mlxsw_sp_port *)priv.data;
} }
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
@ -3726,16 +3727,17 @@ struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{ {
struct mlxsw_sp_port *mlxsw_sp_port; struct netdev_nested_priv priv = {
.data = NULL,
};
if (mlxsw_sp_port_dev_check(dev)) if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev); return netdev_priv(dev);
mlxsw_sp_port = NULL;
netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
&mlxsw_sp_port); &priv);
return mlxsw_sp_port; return (struct mlxsw_sp_port *)priv.data;
} }
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)

View File

@ -292,13 +292,14 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
int err; int err;
group->tcam = tcam; group->tcam = tcam;
mutex_init(&group->lock);
INIT_LIST_HEAD(&group->region_list); INIT_LIST_HEAD(&group->region_list);
err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
if (err) if (err)
return err; return err;
mutex_init(&group->lock);
return 0; return 0;
} }

View File

@ -7351,9 +7351,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err; return err;
} }
static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data) static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
struct netdev_nested_priv *priv)
{ {
struct mlxsw_sp_rif *rif = data; struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
if (!netif_is_macvlan(dev)) if (!netif_is_macvlan(dev))
return 0; return 0;
@ -7364,12 +7365,16 @@ static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
{ {
struct netdev_nested_priv priv = {
.data = (void *)rif,
};
if (!netif_is_macvlan_port(rif->dev)) if (!netif_is_macvlan_port(rif->dev))
return 0; return 0;
netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n"); netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
return netdev_walk_all_upper_dev_rcu(rif->dev, return netdev_walk_all_upper_dev_rcu(rif->dev,
__mlxsw_sp_rif_macvlan_flush, rif); __mlxsw_sp_rif_macvlan_flush, &priv);
} }
static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif, static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,

View File

@ -136,9 +136,9 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
} }
static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
void *data) struct netdev_nested_priv *priv)
{ {
struct mlxsw_sp *mlxsw_sp = data; struct mlxsw_sp *mlxsw_sp = priv->data;
mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
return 0; return 0;
@ -147,10 +147,14 @@ static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev) struct net_device *dev)
{ {
struct netdev_nested_priv priv = {
.data = (void *)mlxsw_sp,
};
mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
netdev_walk_all_upper_dev_rcu(dev, netdev_walk_all_upper_dev_rcu(dev,
mlxsw_sp_bridge_device_upper_rif_destroy, mlxsw_sp_bridge_device_upper_rif_destroy,
mlxsw_sp); &priv);
} }
static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge, static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,

View File

@ -2239,14 +2239,10 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
default: default:
break; break;
} }
clk_disable_unprepare(tp->clk);
} }
static void rtl_pll_power_up(struct rtl8169_private *tp) static void rtl_pll_power_up(struct rtl8169_private *tp)
{ {
clk_prepare_enable(tp->clk);
switch (tp->mac_version) { switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_37:
@ -2904,7 +2900,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
{ 0x08, 0x0001, 0x0002 }, { 0x08, 0x0001, 0x0002 },
{ 0x09, 0x0000, 0x0080 }, { 0x09, 0x0000, 0x0080 },
{ 0x19, 0x0000, 0x0224 }, { 0x19, 0x0000, 0x0224 },
{ 0x00, 0x0000, 0x0004 }, { 0x00, 0x0000, 0x0008 },
{ 0x0c, 0x3df0, 0x0200 }, { 0x0c, 0x3df0, 0x0200 },
}; };
@ -2921,7 +2917,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
{ 0x06, 0x00c0, 0x0020 }, { 0x06, 0x00c0, 0x0020 },
{ 0x0f, 0xffff, 0x5200 }, { 0x0f, 0xffff, 0x5200 },
{ 0x19, 0x0000, 0x0224 }, { 0x19, 0x0000, 0x0224 },
{ 0x00, 0x0000, 0x0004 }, { 0x00, 0x0000, 0x0008 },
{ 0x0c, 0x3df0, 0x0200 }, { 0x0c, 0x3df0, 0x0200 },
}; };
@ -4826,21 +4822,8 @@ static void rtl8169_net_suspend(struct rtl8169_private *tp)
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int __maybe_unused rtl8169_suspend(struct device *device) static int rtl8169_net_resume(struct rtl8169_private *tp)
{ {
struct rtl8169_private *tp = dev_get_drvdata(device);
rtnl_lock();
rtl8169_net_suspend(tp);
rtnl_unlock();
return 0;
}
static int rtl8169_resume(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
rtl_rar_set(tp, tp->dev->dev_addr); rtl_rar_set(tp, tp->dev->dev_addr);
if (tp->TxDescArray) if (tp->TxDescArray)
@ -4851,6 +4834,33 @@ static int rtl8169_resume(struct device *device)
return 0; return 0;
} }
static int __maybe_unused rtl8169_suspend(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
rtnl_lock();
rtl8169_net_suspend(tp);
if (!device_may_wakeup(tp_to_dev(tp)))
clk_disable_unprepare(tp->clk);
rtnl_unlock();
return 0;
}
static int __maybe_unused rtl8169_resume(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
if (!device_may_wakeup(tp_to_dev(tp)))
clk_prepare_enable(tp->clk);
/* Reportedly at least Asus X453MA truncates packets otherwise */
if (tp->mac_version == RTL_GIGA_MAC_VER_37)
rtl_init_rxcfg(tp);
return rtl8169_net_resume(tp);
}
static int rtl8169_runtime_suspend(struct device *device) static int rtl8169_runtime_suspend(struct device *device)
{ {
struct rtl8169_private *tp = dev_get_drvdata(device); struct rtl8169_private *tp = dev_get_drvdata(device);
@ -4874,7 +4884,7 @@ static int rtl8169_runtime_resume(struct device *device)
__rtl8169_set_wol(tp, tp->saved_wolopts); __rtl8169_set_wol(tp, tp->saved_wolopts);
return rtl8169_resume(device); return rtl8169_net_resume(tp);
} }
static int rtl8169_runtime_idle(struct device *device) static int rtl8169_runtime_idle(struct device *device)

View File

@ -1342,51 +1342,6 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
return error; return error;
} }
/* MDIO bus init function */
static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
int error;
/* Bitbang init */
priv->mdiobb.ops = &bb_ops;
/* MII controller setting */
priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
if (!priv->mii_bus)
return -ENOMEM;
/* Hook up MII support for ethtool */
priv->mii_bus->name = "ravb_mii";
priv->mii_bus->parent = dev;
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, pdev->id);
/* Register MDIO bus */
error = of_mdiobus_register(priv->mii_bus, dev->of_node);
if (error)
goto out_free_bus;
return 0;
out_free_bus:
free_mdio_bitbang(priv->mii_bus);
return error;
}
/* MDIO bus release function */
static int ravb_mdio_release(struct ravb_private *priv)
{
/* Unregister mdio bus */
mdiobus_unregister(priv->mii_bus);
/* Free bitbang info */
free_mdio_bitbang(priv->mii_bus);
return 0;
}
/* Network device open function for Ethernet AVB */ /* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev) static int ravb_open(struct net_device *ndev)
{ {
@ -1395,13 +1350,6 @@ static int ravb_open(struct net_device *ndev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int error; int error;
/* MDIO bus init */
error = ravb_mdio_init(priv);
if (error) {
netdev_err(ndev, "failed to initialize MDIO\n");
return error;
}
napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_BE]);
napi_enable(&priv->napi[RAVB_NC]); napi_enable(&priv->napi[RAVB_NC]);
@ -1479,7 +1427,6 @@ out_free_irq:
out_napi_off: out_napi_off:
napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]); napi_disable(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
return error; return error;
} }
@ -1789,8 +1736,6 @@ static int ravb_close(struct net_device *ndev)
ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_BE);
ravb_ring_free(ndev, RAVB_NC); ravb_ring_free(ndev, RAVB_NC);
ravb_mdio_release(priv);
return 0; return 0;
} }
@ -1942,6 +1887,51 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_set_features = ravb_set_features, .ndo_set_features = ravb_set_features,
}; };
/* MDIO bus init function */
static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
int error;
/* Bitbang init */
priv->mdiobb.ops = &bb_ops;
/* MII controller setting */
priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
if (!priv->mii_bus)
return -ENOMEM;
/* Hook up MII support for ethtool */
priv->mii_bus->name = "ravb_mii";
priv->mii_bus->parent = dev;
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, pdev->id);
/* Register MDIO bus */
error = of_mdiobus_register(priv->mii_bus, dev->of_node);
if (error)
goto out_free_bus;
return 0;
out_free_bus:
free_mdio_bitbang(priv->mii_bus);
return error;
}
/* MDIO bus release function */
static int ravb_mdio_release(struct ravb_private *priv)
{
/* Unregister mdio bus */
mdiobus_unregister(priv->mii_bus);
/* Free bitbang info */
free_mdio_bitbang(priv->mii_bus);
return 0;
}
static const struct of_device_id ravb_match_table[] = { static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
@ -2184,6 +2174,13 @@ static int ravb_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev); eth_hw_addr_random(ndev);
} }
/* MDIO bus init */
error = ravb_mdio_init(priv);
if (error) {
dev_err(&pdev->dev, "failed to initialize MDIO\n");
goto out_dma_free;
}
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
@ -2205,6 +2202,8 @@ static int ravb_probe(struct platform_device *pdev)
out_napi_del: out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]); netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
out_dma_free:
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma); priv->desc_bat_dma);
@ -2236,6 +2235,7 @@ static int ravb_remove(struct platform_device *pdev)
unregister_netdev(ndev); unregister_netdev(ndev);
netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]); netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
free_netdev(ndev); free_netdev(ndev);
platform_set_drvdata(pdev, NULL); platform_set_drvdata(pdev, NULL);

View File

@ -3099,9 +3099,10 @@ struct rocker_walk_data {
struct rocker_port *port; struct rocker_port *port;
}; };
static int rocker_lower_dev_walk(struct net_device *lower_dev, void *_data) static int rocker_lower_dev_walk(struct net_device *lower_dev,
struct netdev_nested_priv *priv)
{ {
struct rocker_walk_data *data = _data; struct rocker_walk_data *data = (struct rocker_walk_data *)priv->data;
int ret = 0; int ret = 0;
if (rocker_port_dev_check_under(lower_dev, data->rocker)) { if (rocker_port_dev_check_under(lower_dev, data->rocker)) {
@ -3115,6 +3116,7 @@ static int rocker_lower_dev_walk(struct net_device *lower_dev, void *_data)
struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev, struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
struct rocker *rocker) struct rocker *rocker)
{ {
struct netdev_nested_priv priv;
struct rocker_walk_data data; struct rocker_walk_data data;
if (rocker_port_dev_check_under(dev, rocker)) if (rocker_port_dev_check_under(dev, rocker))
@ -3122,7 +3124,8 @@ struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
data.rocker = rocker; data.rocker = rocker;
data.port = NULL; data.port = NULL;
netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &data); priv.data = (void *)&data;
netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &priv);
return data.port; return data.port;
} }

View File

@ -653,7 +653,6 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk); clk_unregister_fixed_rate(priv->plat->stmmac_clk);
pcim_iounmap_regions(pdev, BIT(0)); pcim_iounmap_regions(pdev, BIT(0));

View File

@ -203,6 +203,8 @@ struct stmmac_priv {
int eee_enabled; int eee_enabled;
int eee_active; int eee_active;
int tx_lpi_timer; int tx_lpi_timer;
int tx_lpi_enabled;
int eee_tw_timer;
unsigned int mode; unsigned int mode;
unsigned int chain_mode; unsigned int chain_mode;
int extend_desc; int extend_desc;

View File

@ -665,6 +665,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
edata->eee_enabled = priv->eee_enabled; edata->eee_enabled = priv->eee_enabled;
edata->eee_active = priv->eee_active; edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer; edata->tx_lpi_timer = priv->tx_lpi_timer;
edata->tx_lpi_enabled = priv->tx_lpi_enabled;
return phylink_ethtool_get_eee(priv->phylink, edata); return phylink_ethtool_get_eee(priv->phylink, edata);
} }
@ -675,24 +676,26 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int ret; int ret;
if (!edata->eee_enabled) { if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
netdev_warn(priv->dev,
"Setting EEE tx-lpi is not supported\n");
if (!edata->eee_enabled)
stmmac_disable_eee_mode(priv); stmmac_disable_eee_mode(priv);
} else {
/* We are asking for enabling the EEE but it is safe
* to verify all by invoking the eee_init function.
* In case of failure it will return an error.
*/
edata->eee_enabled = stmmac_eee_init(priv);
if (!edata->eee_enabled)
return -EOPNOTSUPP;
}
ret = phylink_ethtool_set_eee(priv->phylink, edata); ret = phylink_ethtool_set_eee(priv->phylink, edata);
if (ret) if (ret)
return ret; return ret;
priv->eee_enabled = edata->eee_enabled; if (edata->eee_enabled &&
priv->tx_lpi_timer = edata->tx_lpi_timer; priv->tx_lpi_timer != edata->tx_lpi_timer) {
priv->tx_lpi_timer = edata->tx_lpi_timer;
stmmac_eee_init(priv);
}
return 0; return 0;
} }

View File

@ -94,7 +94,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
module_param(eee_timer, int, 0644); module_param(eee_timer, int, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
/* By default the driver will use the ring mode to manage tx and rx descriptors, /* By default the driver will use the ring mode to manage tx and rx descriptors,
* but allow user to force to use the chain instead of the ring * but allow user to force to use the chain instead of the ring
@ -370,7 +370,7 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
stmmac_enable_eee_mode(priv); stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
} }
/** /**
@ -383,7 +383,7 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
*/ */
bool stmmac_eee_init(struct stmmac_priv *priv) bool stmmac_eee_init(struct stmmac_priv *priv)
{ {
int tx_lpi_timer = priv->tx_lpi_timer; int eee_tw_timer = priv->eee_tw_timer;
/* Using PCS we cannot dial with the phy registers at this stage /* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE. * so we do not support extra feature like EEE.
@ -403,7 +403,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if (priv->eee_enabled) { if (priv->eee_enabled) {
netdev_dbg(priv->dev, "disable EEE\n"); netdev_dbg(priv->dev, "disable EEE\n");
del_timer_sync(&priv->eee_ctrl_timer); del_timer_sync(&priv->eee_ctrl_timer);
stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer); stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
} }
mutex_unlock(&priv->lock); mutex_unlock(&priv->lock);
return false; return false;
@ -411,11 +411,12 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if (priv->eee_active && !priv->eee_enabled) { if (priv->eee_active && !priv->eee_enabled) {
timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
tx_lpi_timer); eee_tw_timer);
} }
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
mutex_unlock(&priv->lock); mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
return true; return true;
@ -930,6 +931,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, false); stmmac_mac_set(priv, priv->ioaddr, false);
priv->eee_active = false; priv->eee_active = false;
priv->tx_lpi_enabled = false;
stmmac_eee_init(priv); stmmac_eee_init(priv);
stmmac_set_eee_pls(priv, priv->hw, false); stmmac_set_eee_pls(priv, priv->hw, false);
} }
@ -1027,6 +1029,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (phy && priv->dma_cap.eee) { if (phy && priv->dma_cap.eee) {
priv->eee_active = phy_init_eee(phy, 1) >= 0; priv->eee_active = phy_init_eee(phy, 1) >= 0;
priv->eee_enabled = stmmac_eee_init(priv); priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true); stmmac_set_eee_pls(priv, priv->hw, true);
} }
} }
@ -2061,7 +2064,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
stmmac_enable_eee_mode(priv); stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
} }
/* We still have pending packets, let's call for a new scheduling */ /* We still have pending packets, let's call for a new scheduling */
@ -2694,7 +2697,11 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
netdev_warn(priv->dev, "PTP init failed\n"); netdev_warn(priv->dev, "PTP init failed\n");
} }
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
/* Convert the timer from msec to usec */
if (!priv->tx_lpi_timer)
priv->tx_lpi_timer = eee_timer * 1000;
if (priv->use_riwt) { if (priv->use_riwt) {
if (!priv->rx_riwt) if (!priv->rx_riwt)

View File

@ -2,7 +2,7 @@
/* /*
Written 1998-2001 by Donald Becker. Written 1998-2001 by Donald Becker.
Current Maintainer: Roger Luethi <rl@hellgate.ch> Current Maintainer: Kevin Brace <kevinbrace@bracecomputerlab.com>
This software may be used and distributed according to the terms of This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference. the GNU General Public License (GPL), incorporated herein by reference.
@ -32,8 +32,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "via-rhine" #define DRV_NAME "via-rhine"
#define DRV_VERSION "1.5.1"
#define DRV_RELDATE "2010-10-09"
#include <linux/types.h> #include <linux/types.h>
@ -117,10 +115,6 @@ static const int multicast_filter_limit = 32;
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/dmi.h> #include <linux/dmi.h>
/* These identify the driver base version and may not be removed. */
static const char version[] =
"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
@ -243,7 +237,7 @@ enum rhine_revs {
VT8233 = 0x60, /* Integrated MAC */ VT8233 = 0x60, /* Integrated MAC */
VT8235 = 0x74, /* Integrated MAC */ VT8235 = 0x74, /* Integrated MAC */
VT8237 = 0x78, /* Integrated MAC */ VT8237 = 0x78, /* Integrated MAC */
VTunknown1 = 0x7C, VT8251 = 0x7C, /* Integrated MAC */
VT6105 = 0x80, VT6105 = 0x80,
VT6105_B0 = 0x83, VT6105_B0 = 0x83,
VT6105L = 0x8A, VT6105L = 0x8A,
@ -1051,11 +1045,6 @@ static int rhine_init_one_pci(struct pci_dev *pdev,
u32 quirks = 0; u32 quirks = 0;
#endif #endif
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
pr_info_once("%s\n", version);
#endif
rc = pci_enable_device(pdev); rc = pci_enable_device(pdev);
if (rc) if (rc)
goto err_out; goto err_out;
@ -1706,6 +1695,8 @@ static int rhine_open(struct net_device *dev)
goto out_free_ring; goto out_free_ring;
alloc_tbufs(dev); alloc_tbufs(dev);
enable_mmio(rp->pioaddr, rp->quirks);
rhine_power_init(dev);
rhine_chip_reset(dev); rhine_chip_reset(dev);
rhine_task_enable(rp); rhine_task_enable(rp);
init_registers(dev); init_registers(dev);
@ -2294,7 +2285,6 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
struct device *hwdev = dev->dev.parent; struct device *hwdev = dev->dev.parent;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info)); strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
} }
@ -2616,9 +2606,6 @@ static int __init rhine_init(void)
int ret_pci, ret_platform; int ret_pci, ret_platform;
/* when a module, this is printed whether or not devices are found in probe */ /* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
pr_info("%s\n", version);
#endif
if (dmi_check_system(rhine_dmi_table)) { if (dmi_check_system(rhine_dmi_table)) {
/* these BIOSes fail at PXE boot if chip is in D3 */ /* these BIOSes fail at PXE boot if chip is in D3 */
avoid_D3 = true; avoid_D3 = true;

View File

@ -222,6 +222,7 @@ config MDIO_THUNDER
depends on 64BIT depends on 64BIT
depends on PCI depends on PCI
select MDIO_CAVIUM select MDIO_CAVIUM
select MDIO_DEVRES
help help
This driver supports the MDIO interfaces found on Cavium This driver supports the MDIO interfaces found on Cavium
ThunderX SoCs when the MDIO bus device appears as a PCI ThunderX SoCs when the MDIO bus device appears as a PCI

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+ // SPDX-License-Identifier: GPL-2.0+
/* /* drivers/net/phy/realtek.c
* drivers/net/phy/realtek.c
* *
* Driver for Realtek PHYs * Driver for Realtek PHYs
* *
@ -32,9 +31,9 @@
#define RTL8211F_TX_DELAY BIT(8) #define RTL8211F_TX_DELAY BIT(8)
#define RTL8211F_RX_DELAY BIT(3) #define RTL8211F_RX_DELAY BIT(3)
#define RTL8211E_TX_DELAY BIT(1) #define RTL8211E_CTRL_DELAY BIT(13)
#define RTL8211E_RX_DELAY BIT(2) #define RTL8211E_TX_DELAY BIT(12)
#define RTL8211E_MODE_MII_GMII BIT(3) #define RTL8211E_RX_DELAY BIT(11)
#define RTL8201F_ISR 0x1e #define RTL8201F_ISR 0x1e
#define RTL8201F_IER 0x13 #define RTL8201F_IER 0x13
@ -246,16 +245,16 @@ static int rtl8211e_config_init(struct phy_device *phydev)
/* enable TX/RX delay for rgmii-* modes, and disable them for rgmii. */ /* enable TX/RX delay for rgmii-* modes, and disable them for rgmii. */
switch (phydev->interface) { switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII:
val = 0; val = RTL8211E_CTRL_DELAY | 0;
break; break;
case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_ID:
val = RTL8211E_TX_DELAY | RTL8211E_RX_DELAY; val = RTL8211E_CTRL_DELAY | RTL8211E_TX_DELAY | RTL8211E_RX_DELAY;
break; break;
case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_RXID:
val = RTL8211E_RX_DELAY; val = RTL8211E_CTRL_DELAY | RTL8211E_RX_DELAY;
break; break;
case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_RGMII_TXID:
val = RTL8211E_TX_DELAY; val = RTL8211E_CTRL_DELAY | RTL8211E_TX_DELAY;
break; break;
default: /* the rest of the modes imply leaving delays as is. */ default: /* the rest of the modes imply leaving delays as is. */
return 0; return 0;
@ -263,11 +262,12 @@ static int rtl8211e_config_init(struct phy_device *phydev)
/* According to a sample driver there is a 0x1c config register on the /* According to a sample driver there is a 0x1c config register on the
* 0xa4 extension page (0x7) layout. It can be used to disable/enable * 0xa4 extension page (0x7) layout. It can be used to disable/enable
* the RX/TX delays otherwise controlled by RXDLY/TXDLY pins. It can * the RX/TX delays otherwise controlled by RXDLY/TXDLY pins.
* also be used to customize the whole configuration register: * The configuration register definition:
* 8:6 = PHY Address, 5:4 = Auto-Negotiation, 3 = Interface Mode Select, * 14 = reserved
* 2 = RX Delay, 1 = TX Delay, 0 = SELRGV (see original PHY datasheet * 13 = Force Tx RX Delay controlled by bit12 bit11,
* for details). * 12 = RX Delay, 11 = TX Delay
* 10:0 = Test && debug settings reserved by realtek
*/ */
oldpage = phy_select_page(phydev, 0x7); oldpage = phy_select_page(phydev, 0x7);
if (oldpage < 0) if (oldpage < 0)
@ -277,7 +277,8 @@ static int rtl8211e_config_init(struct phy_device *phydev)
if (ret) if (ret)
goto err_restore_page; goto err_restore_page;
ret = __phy_modify(phydev, 0x1c, RTL8211E_TX_DELAY | RTL8211E_RX_DELAY, ret = __phy_modify(phydev, 0x1c, RTL8211E_CTRL_DELAY
| RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
val); val);
err_restore_page: err_restore_page:

View File

@ -287,7 +287,7 @@ inst_rollback:
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
__team_option_inst_del_option(team, dst_opts[i]); __team_option_inst_del_option(team, dst_opts[i]);
i = option_count - 1; i = option_count;
alloc_rollback: alloc_rollback:
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
kfree(dst_opts[i]); kfree(dst_opts[i]);
@ -2112,6 +2112,7 @@ static void team_setup_by_port(struct net_device *dev,
dev->header_ops = port_dev->header_ops; dev->header_ops = port_dev->header_ops;
dev->type = port_dev->type; dev->type = port_dev->type;
dev->hard_header_len = port_dev->hard_header_len; dev->hard_header_len = port_dev->hard_header_len;
dev->needed_headroom = port_dev->needed_headroom;
dev->addr_len = port_dev->addr_len; dev->addr_len = port_dev->addr_len;
dev->mtu = port_dev->mtu; dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);

View File

@ -1823,6 +1823,33 @@ static const struct driver_info belkin_info = {
.status = ax88179_status, .status = ax88179_status,
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
static const struct driver_info toshiba_info = {
.description = "Toshiba USB Ethernet Adapter",
.bind = ax88179_bind,
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
static const struct driver_info mct_info = {
.description = "MCT USB 3.0 Gigabit Ethernet Adapter",
.bind = ax88179_bind,
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
@ -1861,6 +1888,14 @@ static const struct usb_device_id products[] = {
/* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */ /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */
USB_DEVICE(0x050d, 0x0128), USB_DEVICE(0x050d, 0x0128),
.driver_info = (unsigned long)&belkin_info, .driver_info = (unsigned long)&belkin_info,
}, {
/* Toshiba USB 3.0 GBit Ethernet Adapter */
USB_DEVICE(0x0930, 0x0a13),
.driver_info = (unsigned long)&toshiba_info,
}, {
/* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */
USB_DEVICE(0x0711, 0x0179),
.driver_info = (unsigned long)&mct_info,
}, },
{ }, { },
}; };

View File

@ -360,28 +360,47 @@ fail:
} }
#endif /* PEGASUS_WRITE_EEPROM */ #endif /* PEGASUS_WRITE_EEPROM */
static inline void get_node_id(pegasus_t *pegasus, __u8 *id) static inline int get_node_id(pegasus_t *pegasus, u8 *id)
{ {
int i; int i, ret;
__u16 w16; u16 w16;
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
read_eprom_word(pegasus, i, &w16); ret = read_eprom_word(pegasus, i, &w16);
if (ret < 0)
return ret;
((__le16 *) id)[i] = cpu_to_le16(w16); ((__le16 *) id)[i] = cpu_to_le16(w16);
} }
return 0;
} }
static void set_ethernet_addr(pegasus_t *pegasus) static void set_ethernet_addr(pegasus_t *pegasus)
{ {
__u8 node_id[6]; int ret;
u8 node_id[6];
if (pegasus->features & PEGASUS_II) { if (pegasus->features & PEGASUS_II) {
get_registers(pegasus, 0x10, sizeof(node_id), node_id); ret = get_registers(pegasus, 0x10, sizeof(node_id), node_id);
if (ret < 0)
goto err;
} else { } else {
get_node_id(pegasus, node_id); ret = get_node_id(pegasus, node_id);
set_registers(pegasus, EthID, sizeof(node_id), node_id); if (ret < 0)
goto err;
ret = set_registers(pegasus, EthID, sizeof(node_id), node_id);
if (ret < 0)
goto err;
} }
memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id)); memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id));
return;
err:
eth_hw_addr_random(pegasus->net);
dev_info(&pegasus->intf->dev, "software assigned MAC address.\n");
return;
} }
static inline int reset_mac(pegasus_t *pegasus) static inline int reset_mac(pegasus_t *pegasus)

View File

@ -63,6 +63,11 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM VIRTIO_NET_F_GUEST_CSUM
}; };
#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO))
struct virtnet_stat_desc { struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN]; char desc[ETH_GSTRING_LEN];
size_t offset; size_t offset;
@ -2531,7 +2536,8 @@ static int virtnet_set_features(struct net_device *dev,
if (features & NETIF_F_LRO) if (features & NETIF_F_LRO)
offloads = vi->guest_offloads_capable; offloads = vi->guest_offloads_capable;
else else
offloads = 0; offloads = vi->guest_offloads_capable &
~GUEST_OFFLOAD_LRO_MASK;
err = virtnet_set_guest_offloads(vi, offloads); err = virtnet_set_guest_offloads(vi, offloads);
if (err) if (err)

View File

@ -1032,7 +1032,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
/* Use temporary descriptor to avoid touching bits multiple times */ /* Use temporary descriptor to avoid touching bits multiple times */
union Vmxnet3_GenericDesc tempTxDesc; union Vmxnet3_GenericDesc tempTxDesc;
#endif #endif
struct udphdr *udph;
count = txd_estimate(skb); count = txd_estimate(skb);
@ -1135,8 +1134,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
gdesc->txd.om = VMXNET3_OM_ENCAP; gdesc->txd.om = VMXNET3_OM_ENCAP;
gdesc->txd.msscof = ctx.mss; gdesc->txd.msscof = ctx.mss;
udph = udp_hdr(skb); if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
if (udph->check)
gdesc->txd.oco = 1; gdesc->txd.oco = 1;
} else { } else {
gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
@ -3371,6 +3369,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
.ndo_change_mtu = vmxnet3_change_mtu, .ndo_change_mtu = vmxnet3_change_mtu,
.ndo_fix_features = vmxnet3_fix_features, .ndo_fix_features = vmxnet3_fix_features,
.ndo_set_features = vmxnet3_set_features, .ndo_set_features = vmxnet3_set_features,
.ndo_features_check = vmxnet3_features_check,
.ndo_get_stats64 = vmxnet3_get_stats64, .ndo_get_stats64 = vmxnet3_get_stats64,
.ndo_tx_timeout = vmxnet3_tx_timeout, .ndo_tx_timeout = vmxnet3_tx_timeout,
.ndo_set_rx_mode = vmxnet3_set_mc, .ndo_set_rx_mode = vmxnet3_set_mc,

View File

@ -267,6 +267,34 @@ netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
return features; return features;
} }
netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
/* Validate if the tunneled packet is being offloaded by the device */
if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_proto = 0;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
l4_proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
l4_proto = ipv6_hdr(skb)->nexthdr;
break;
default:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
if (l4_proto != IPPROTO_UDP)
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
return features;
}
static void vmxnet3_enable_encap_offloads(struct net_device *netdev) static void vmxnet3_enable_encap_offloads(struct net_device *netdev)
{ {
struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct vmxnet3_adapter *adapter = netdev_priv(netdev);

View File

@ -470,6 +470,10 @@ vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
netdev_features_t netdev_features_t
vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features); vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
netdev_features_t
vmxnet3_features_check(struct sk_buff *skb,
struct net_device *netdev, netdev_features_t features);
int int
vmxnet3_set_features(struct net_device *netdev, netdev_features_t features); vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);

View File

@ -464,7 +464,6 @@ static int x25_asy_open(struct net_device *dev)
{ {
struct x25_asy *sl = netdev_priv(dev); struct x25_asy *sl = netdev_priv(dev);
unsigned long len; unsigned long len;
int err;
if (sl->tty == NULL) if (sl->tty == NULL)
return -ENODEV; return -ENODEV;
@ -490,14 +489,7 @@ static int x25_asy_open(struct net_device *dev)
sl->xleft = 0; sl->xleft = 0;
sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */ sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */
netif_start_queue(dev); return 0;
/*
* Now attach LAPB
*/
err = lapb_register(dev, &x25_asy_callbacks);
if (err == LAPB_OK)
return 0;
/* Cleanup */ /* Cleanup */
kfree(sl->xbuff); kfree(sl->xbuff);
@ -519,7 +511,6 @@ static int x25_asy_close(struct net_device *dev)
if (sl->tty) if (sl->tty)
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
netif_stop_queue(dev);
sl->rcount = 0; sl->rcount = 0;
sl->xleft = 0; sl->xleft = 0;
spin_unlock(&sl->lock); spin_unlock(&sl->lock);
@ -604,7 +595,6 @@ static int x25_asy_open_tty(struct tty_struct *tty)
static void x25_asy_close_tty(struct tty_struct *tty) static void x25_asy_close_tty(struct tty_struct *tty)
{ {
struct x25_asy *sl = tty->disc_data; struct x25_asy *sl = tty->disc_data;
int err;
/* First make sure we're connected. */ /* First make sure we're connected. */
if (!sl || sl->magic != X25_ASY_MAGIC) if (!sl || sl->magic != X25_ASY_MAGIC)
@ -615,11 +605,6 @@ static void x25_asy_close_tty(struct tty_struct *tty)
dev_close(sl->dev); dev_close(sl->dev);
rtnl_unlock(); rtnl_unlock();
err = lapb_unregister(sl->dev);
if (err != LAPB_OK)
pr_err("%s: lapb_unregister error: %d\n",
__func__, err);
tty->disc_data = NULL; tty->disc_data = NULL;
sl->tty = NULL; sl->tty = NULL;
x25_asy_free(sl); x25_asy_free(sl);
@ -722,15 +707,39 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
static int x25_asy_open_dev(struct net_device *dev) static int x25_asy_open_dev(struct net_device *dev)
{ {
int err;
struct x25_asy *sl = netdev_priv(dev); struct x25_asy *sl = netdev_priv(dev);
if (sl->tty == NULL) if (sl->tty == NULL)
return -ENODEV; return -ENODEV;
err = lapb_register(dev, &x25_asy_callbacks);
if (err != LAPB_OK)
return -ENOMEM;
netif_start_queue(dev);
return 0;
}
static int x25_asy_close_dev(struct net_device *dev)
{
int err;
netif_stop_queue(dev);
err = lapb_unregister(dev);
if (err != LAPB_OK)
pr_err("%s: lapb_unregister error: %d\n",
__func__, err);
x25_asy_close(dev);
return 0; return 0;
} }
static const struct net_device_ops x25_asy_netdev_ops = { static const struct net_device_ops x25_asy_netdev_ops = {
.ndo_open = x25_asy_open_dev, .ndo_open = x25_asy_open_dev,
.ndo_stop = x25_asy_close, .ndo_stop = x25_asy_close_dev,
.ndo_start_xmit = x25_asy_xmit, .ndo_start_xmit = x25_asy_xmit,
.ndo_tx_timeout = x25_asy_timeout, .ndo_tx_timeout = x25_asy_timeout,
.ndo_change_mtu = x25_asy_change_mtu, .ndo_change_mtu = x25_asy_change_mtu,

View File

@ -460,7 +460,7 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mphy.sband_5g.sband.vht_cap.cap |= dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
mt7615_cap_dbdc_disable(dev); mt7615_cap_dbdc_disable(dev);
dev->phy.dfs_state = -1; dev->phy.dfs_state = -1;

View File

@ -671,9 +671,10 @@ bool qtnf_netdev_is_qtn(const struct net_device *ndev)
return ndev->netdev_ops == &qtnf_netdev_ops; return ndev->netdev_ops == &qtnf_netdev_ops;
} }
static int qtnf_check_br_ports(struct net_device *dev, void *data) static int qtnf_check_br_ports(struct net_device *dev,
struct netdev_nested_priv *priv)
{ {
struct net_device *ndev = data; struct net_device *ndev = (struct net_device *)priv->data;
if (dev != ndev && netdev_port_same_parent_id(dev, ndev)) if (dev != ndev && netdev_port_same_parent_id(dev, ndev))
return -ENOTSUPP; return -ENOTSUPP;
@ -686,6 +687,9 @@ static int qtnf_core_netdevice_event(struct notifier_block *nb,
{ {
struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
const struct netdev_notifier_changeupper_info *info; const struct netdev_notifier_changeupper_info *info;
struct netdev_nested_priv priv = {
.data = (void *)ndev,
};
struct net_device *brdev; struct net_device *brdev;
struct qtnf_vif *vif; struct qtnf_vif *vif;
struct qtnf_bus *bus; struct qtnf_bus *bus;
@ -725,7 +729,7 @@ static int qtnf_core_netdevice_event(struct notifier_block *nb,
} else { } else {
ret = netdev_walk_all_lower_dev(brdev, ret = netdev_walk_all_lower_dev(brdev,
qtnf_check_br_ports, qtnf_check_br_ports,
ndev); &priv);
} }
break; break;

View File

@ -913,12 +913,11 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
else else
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
/* can't zcopy slab pages */ if (sendpage_ok(page)) {
if (unlikely(PageSlab(page))) { ret = kernel_sendpage(queue->sock, page, offset, len,
ret = sock_no_sendpage(queue->sock, page, offset, len,
flags); flags);
} else { } else {
ret = kernel_sendpage(queue->sock, page, offset, len, ret = sock_no_sendpage(queue->sock, page, offset, len,
flags); flags);
} }
if (ret <= 0) if (ret <= 0)

View File

@ -128,7 +128,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
* coalescing neighboring slab objects into a single frag which * coalescing neighboring slab objects into a single frag which
* triggers one of hardened usercopy checks. * triggers one of hardened usercopy checks.
*/ */
if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg))) if (!recv && sendpage_ok(sg_page(sg)))
return; return;
if (recv) { if (recv) {

View File

@ -661,7 +661,7 @@
#define BTF \ #define BTF \
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
__start_BTF = .; \ __start_BTF = .; \
*(.BTF) \ KEEP(*(.BTF)) \
__stop_BTF = .; \ __stop_BTF = .; \
} \ } \
. = ALIGN(4); \ . = ALIGN(4); \

View File

@ -767,6 +767,8 @@ struct mlx5_cmd_work_ent {
u64 ts2; u64 ts2;
u16 op; u16 op;
bool polling; bool polling;
/* Track the max comp handlers */
refcount_t refcnt;
}; };
struct mlx5_pas { struct mlx5_pas {
@ -933,6 +935,7 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size); void *out, int out_size);
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);

View File

@ -21,6 +21,7 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/once.h> #include <linux/once.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sockptr.h> #include <linux/sockptr.h>
#include <uapi/linux/net.h> #include <uapi/linux/net.h>
@ -286,6 +287,21 @@ do { \
#define net_get_random_once_wait(buf, nbytes) \ #define net_get_random_once_wait(buf, nbytes) \
get_random_once_wait((buf), (nbytes)) get_random_once_wait((buf), (nbytes))
/*
* E.g. XFS meta- & log-data is in slab pages, or bcache meta
* data pages, or other high order pages allocated by
* __get_free_pages() without __GFP_COMP, which have a page_count
* of 0 and/or have PageSlab() set. We cannot use send_page for
* those, as that does get_page(); put_page(); and would cause
* either a VM_BUG directly, or __page_cache_release a page that
* would actually still be referenced by someone, leading to some
* obscure delayed Oops somewhere else.
*/
static inline bool sendpage_ok(struct page *page)
{
return !PageSlab(page) && page_count(page) >= 1;
}
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len); size_t num, size_t len);
int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,

View File

@ -1851,6 +1851,11 @@ enum netdev_priv_flags {
* @udp_tunnel_nic: UDP tunnel offload state * @udp_tunnel_nic: UDP tunnel offload state
* @xdp_state: stores info on attached XDP BPF programs * @xdp_state: stores info on attached XDP BPF programs
* *
* @nested_level: Used as as a parameter of spin_lock_nested() of
* dev->addr_list_lock.
* @unlink_list: As netif_addr_lock() can be called recursively,
* keep a list of interfaces to be deleted.
*
* FIXME: cleanup struct net_device such that network protocol info * FIXME: cleanup struct net_device such that network protocol info
* moves out. * moves out.
*/ */
@ -1955,6 +1960,7 @@ struct net_device {
unsigned short type; unsigned short type;
unsigned short hard_header_len; unsigned short hard_header_len;
unsigned char min_header_len; unsigned char min_header_len;
unsigned char name_assign_type;
unsigned short needed_headroom; unsigned short needed_headroom;
unsigned short needed_tailroom; unsigned short needed_tailroom;
@ -1965,21 +1971,28 @@ struct net_device {
unsigned char addr_len; unsigned char addr_len;
unsigned char upper_level; unsigned char upper_level;
unsigned char lower_level; unsigned char lower_level;
unsigned short neigh_priv_len; unsigned short neigh_priv_len;
unsigned short dev_id; unsigned short dev_id;
unsigned short dev_port; unsigned short dev_port;
spinlock_t addr_list_lock; spinlock_t addr_list_lock;
unsigned char name_assign_type;
bool uc_promisc;
struct netdev_hw_addr_list uc; struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc; struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs; struct netdev_hw_addr_list dev_addrs;
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
struct kset *queues_kset; struct kset *queues_kset;
#endif
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
#endif #endif
unsigned int promiscuity; unsigned int promiscuity;
unsigned int allmulti; unsigned int allmulti;
bool uc_promisc;
#ifdef CONFIG_LOCKDEP
unsigned char nested_level;
#endif
/* Protocol-specific pointers */ /* Protocol-specific pointers */
@ -4260,17 +4273,23 @@ static inline void netif_tx_disable(struct net_device *dev)
static inline void netif_addr_lock(struct net_device *dev) static inline void netif_addr_lock(struct net_device *dev)
{ {
spin_lock(&dev->addr_list_lock); unsigned char nest_level = 0;
}
static inline void netif_addr_lock_nested(struct net_device *dev) #ifdef CONFIG_LOCKDEP
{ nest_level = dev->nested_level;
spin_lock_nested(&dev->addr_list_lock, dev->lower_level); #endif
spin_lock_nested(&dev->addr_list_lock, nest_level);
} }
static inline void netif_addr_lock_bh(struct net_device *dev) static inline void netif_addr_lock_bh(struct net_device *dev)
{ {
spin_lock_bh(&dev->addr_list_lock); unsigned char nest_level = 0;
#ifdef CONFIG_LOCKDEP
nest_level = dev->nested_level;
#endif
local_bh_disable();
spin_lock_nested(&dev->addr_list_lock, nest_level);
} }
static inline void netif_addr_unlock(struct net_device *dev) static inline void netif_addr_unlock(struct net_device *dev)
@ -4455,12 +4474,38 @@ extern int dev_rx_weight;
extern int dev_tx_weight; extern int dev_tx_weight;
extern int gro_normal_batch; extern int gro_normal_batch;
enum {
NESTED_SYNC_IMM_BIT,
NESTED_SYNC_TODO_BIT,
};
#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
struct netdev_nested_priv {
unsigned char flags;
void *data;
};
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter); struct list_head **iter);
struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter); struct list_head **iter);
#ifdef CONFIG_LOCKDEP
static LIST_HEAD(net_unlink_list);
static inline void net_unlink_todo(struct net_device *dev)
{
if (list_empty(&dev->unlink_list))
list_add_tail(&dev->unlink_list, &net_unlink_list);
}
#endif
/* iterate through upper list, must be called under RCU read lock */ /* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
for (iter = &(dev)->adj_list.upper, \ for (iter = &(dev)->adj_list.upper, \
@ -4470,8 +4515,8 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
int netdev_walk_all_upper_dev_rcu(struct net_device *dev, int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *upper_dev, int (*fn)(struct net_device *upper_dev,
void *data), struct netdev_nested_priv *priv),
void *data); struct netdev_nested_priv *priv);
bool netdev_has_upper_dev_all_rcu(struct net_device *dev, bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev); struct net_device *upper_dev);
@ -4508,12 +4553,12 @@ struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter); struct list_head **iter);
int netdev_walk_all_lower_dev(struct net_device *dev, int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *lower_dev, int (*fn)(struct net_device *lower_dev,
void *data), struct netdev_nested_priv *priv),
void *data); struct netdev_nested_priv *priv);
int netdev_walk_all_lower_dev_rcu(struct net_device *dev, int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *lower_dev, int (*fn)(struct net_device *lower_dev,
void *data), struct netdev_nested_priv *priv),
void *data); struct netdev_nested_priv *priv);
void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev); void *netdev_lower_get_first_private_rcu(struct net_device *dev);

View File

@ -166,8 +166,6 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
struct nlattr *est, struct tc_action **a, struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int bind, const struct tc_action_ops *ops, int bind,
u32 flags); u32 flags);
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind); struct tc_action **a, int bind);

View File

@ -138,6 +138,7 @@ genl_dumpit_info(struct netlink_callback *cb)
* @cmd: command identifier * @cmd: command identifier
* @internal_flags: flags used by the family * @internal_flags: flags used by the family
* @flags: flags * @flags: flags
* @validate: validation flags from enum genl_validate_flags
* @doit: standard command callback * @doit: standard command callback
* @start: start callback for dumps * @start: start callback for dumps
* @dumpit: callback for dumpers * @dumpit: callback for dumpers

View File

@ -436,12 +436,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding) bool forwarding)
{ {
struct net *net = dev_net(dst->dev); struct net *net = dev_net(dst->dev);
unsigned int mtu;
if (net->ipv4.sysctl_ip_fwd_use_pmtu || if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
ip_mtu_locked(dst) || ip_mtu_locked(dst) ||
!forwarding) !forwarding)
return dst_mtu(dst); return dst_mtu(dst);
/* 'forwarding = true' case should always honour route mtu */
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
return mtu;
return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
} }

View File

@ -1934,7 +1934,8 @@ void nla_get_range_signed(const struct nla_policy *pt,
int netlink_policy_dump_start(const struct nla_policy *policy, int netlink_policy_dump_start(const struct nla_policy *policy,
unsigned int maxtype, unsigned int maxtype,
unsigned long *state); unsigned long *state);
bool netlink_policy_dump_loop(unsigned long *state); bool netlink_policy_dump_loop(unsigned long state);
int netlink_policy_dump_write(struct sk_buff *skb, unsigned long state); int netlink_policy_dump_write(struct sk_buff *skb, unsigned long state);
void netlink_policy_dump_free(unsigned long state);
#endif #endif

View File

@ -1773,21 +1773,17 @@ static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_es
static inline int xfrm_replay_clone(struct xfrm_state *x, static inline int xfrm_replay_clone(struct xfrm_state *x,
struct xfrm_state *orig) struct xfrm_state *orig)
{ {
x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
x->replay_esn = kmemdup(orig->replay_esn,
xfrm_replay_state_esn_len(orig->replay_esn),
GFP_KERNEL); GFP_KERNEL);
if (!x->replay_esn) if (!x->replay_esn)
return -ENOMEM; return -ENOMEM;
x->preplay_esn = kmemdup(orig->preplay_esn,
x->replay_esn->bmp_len = orig->replay_esn->bmp_len; xfrm_replay_state_esn_len(orig->preplay_esn),
x->replay_esn->replay_window = orig->replay_esn->replay_window;
x->preplay_esn = kmemdup(x->replay_esn,
xfrm_replay_state_esn_len(x->replay_esn),
GFP_KERNEL); GFP_KERNEL);
if (!x->preplay_esn) { if (!x->preplay_esn)
kfree(x->replay_esn);
return -ENOMEM; return -ENOMEM;
}
return 0; return 0;
} }

View File

@ -252,10 +252,10 @@
#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16) #define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16) #define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20) #define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24)) #define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 21) & GENMASK(24, 21))
#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24) #define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(24, 21)
#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24) #define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(24, 21)) >> 21)
#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28) #define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(25)
#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4 #define ANA_SG_GCL_GS_CONFIG_RSZ 0x4

View File

@ -288,6 +288,7 @@ enum
LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */ LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */
LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */ LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */
LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */ LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */
LINUX_MIB_TCPDSACKIGNOREDDUBIOUS, /* TCPDSACKIgnoredDubious */
__LINUX_MIB_MAX __LINUX_MIB_MAX
}; };

View File

@ -30,15 +30,15 @@ static struct kobject *btf_kobj;
static int __init btf_vmlinux_init(void) static int __init btf_vmlinux_init(void)
{ {
if (!__start_BTF) bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF;
if (!__start_BTF || bin_attr_btf_vmlinux.size == 0)
return 0; return 0;
btf_kobj = kobject_create_and_add("btf", kernel_kobj); btf_kobj = kobject_create_and_add("btf", kernel_kobj);
if (!btf_kobj) if (!btf_kobj)
return -ENOMEM; return -ENOMEM;
bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF;
return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux); return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux);
} }

View File

@ -88,9 +88,10 @@ static void br_arp_send(struct net_bridge *br, struct net_bridge_port *p,
} }
} }
static int br_chk_addr_ip(struct net_device *dev, void *data) static int br_chk_addr_ip(struct net_device *dev,
struct netdev_nested_priv *priv)
{ {
__be32 ip = *(__be32 *)data; __be32 ip = *(__be32 *)priv->data;
struct in_device *in_dev; struct in_device *in_dev;
__be32 addr = 0; __be32 addr = 0;
@ -107,11 +108,15 @@ static int br_chk_addr_ip(struct net_device *dev, void *data)
static bool br_is_local_ip(struct net_device *dev, __be32 ip) static bool br_is_local_ip(struct net_device *dev, __be32 ip)
{ {
if (br_chk_addr_ip(dev, &ip)) struct netdev_nested_priv priv = {
.data = (void *)&ip,
};
if (br_chk_addr_ip(dev, &priv))
return true; return true;
/* check if ip is configured on upper dev */ /* check if ip is configured on upper dev */
if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip, &ip)) if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip, &priv))
return true; return true;
return false; return false;
@ -361,9 +366,10 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p,
} }
} }
static int br_chk_addr_ip6(struct net_device *dev, void *data) static int br_chk_addr_ip6(struct net_device *dev,
struct netdev_nested_priv *priv)
{ {
struct in6_addr *addr = (struct in6_addr *)data; struct in6_addr *addr = (struct in6_addr *)priv->data;
if (ipv6_chk_addr(dev_net(dev), addr, dev, 0)) if (ipv6_chk_addr(dev_net(dev), addr, dev, 0))
return 1; return 1;
@ -374,11 +380,15 @@ static int br_chk_addr_ip6(struct net_device *dev, void *data)
static bool br_is_local_ip6(struct net_device *dev, struct in6_addr *addr) static bool br_is_local_ip6(struct net_device *dev, struct in6_addr *addr)
{ {
if (br_chk_addr_ip6(dev, addr)) struct netdev_nested_priv priv = {
.data = (void *)addr,
};
if (br_chk_addr_ip6(dev, &priv))
return true; return true;
/* check if ip is configured on upper dev */ /* check if ip is configured on upper dev */
if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip6, addr)) if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip6, &priv))
return true; return true;
return false; return false;

View File

@ -413,6 +413,8 @@ void br_fdb_delete_by_port(struct net_bridge *br,
if (!do_all) if (!do_all)
if (test_bit(BR_FDB_STATIC, &f->flags) || if (test_bit(BR_FDB_STATIC, &f->flags) ||
(test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
!test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
(vid && f->key.vlan_id != vid)) (vid && f->key.vlan_id != vid))
continue; continue;

View File

@ -1360,7 +1360,7 @@ static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
} }
static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev, static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
__always_unused void *data) __always_unused struct netdev_nested_priv *priv)
{ {
return br_vlan_is_bind_vlan_dev(dev); return br_vlan_is_bind_vlan_dev(dev);
} }
@ -1383,9 +1383,9 @@ struct br_vlan_bind_walk_data {
}; };
static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev, static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
void *data_in) struct netdev_nested_priv *priv)
{ {
struct br_vlan_bind_walk_data *data = data_in; struct br_vlan_bind_walk_data *data = priv->data;
int found = 0; int found = 0;
if (br_vlan_is_bind_vlan_dev(dev) && if (br_vlan_is_bind_vlan_dev(dev) &&
@ -1403,10 +1403,13 @@ br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
struct br_vlan_bind_walk_data data = { struct br_vlan_bind_walk_data data = {
.vid = vid, .vid = vid,
}; };
struct netdev_nested_priv priv = {
.data = (void *)&data,
};
rcu_read_lock(); rcu_read_lock();
netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn, netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
&data); &priv);
rcu_read_unlock(); rcu_read_unlock();
return data.result; return data.result;
@ -1487,9 +1490,9 @@ struct br_vlan_link_state_walk_data {
}; };
static int br_vlan_link_state_change_fn(struct net_device *vlan_dev, static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
void *data_in) struct netdev_nested_priv *priv)
{ {
struct br_vlan_link_state_walk_data *data = data_in; struct br_vlan_link_state_walk_data *data = priv->data;
if (br_vlan_is_bind_vlan_dev(vlan_dev)) if (br_vlan_is_bind_vlan_dev(vlan_dev))
br_vlan_set_vlan_dev_state(data->br, vlan_dev); br_vlan_set_vlan_dev_state(data->br, vlan_dev);
@ -1503,10 +1506,13 @@ static void br_vlan_link_state_change(struct net_device *dev,
struct br_vlan_link_state_walk_data data = { struct br_vlan_link_state_walk_data data = {
.br = br .br = br
}; };
struct netdev_nested_priv priv = {
.data = (void *)&data,
};
rcu_read_lock(); rcu_read_lock();
netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn, netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
&data); &priv);
rcu_read_unlock(); rcu_read_unlock();
} }

View File

@ -575,7 +575,7 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
* coalescing neighboring slab objects into a single frag which * coalescing neighboring slab objects into a single frag which
* triggers one of hardened usercopy checks. * triggers one of hardened usercopy checks.
*/ */
if (page_count(page) >= 1 && !PageSlab(page)) if (sendpage_ok(page))
sendpage = sock->ops->sendpage; sendpage = sock->ops->sendpage;
else else
sendpage = sock_no_sendpage; sendpage = sock_no_sendpage;

View File

@ -6812,9 +6812,10 @@ static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
return NULL; return NULL;
} }
static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data) static int ____netdev_has_upper_dev(struct net_device *upper_dev,
struct netdev_nested_priv *priv)
{ {
struct net_device *dev = data; struct net_device *dev = (struct net_device *)priv->data;
return upper_dev == dev; return upper_dev == dev;
} }
@ -6831,10 +6832,14 @@ static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
bool netdev_has_upper_dev(struct net_device *dev, bool netdev_has_upper_dev(struct net_device *dev,
struct net_device *upper_dev) struct net_device *upper_dev)
{ {
struct netdev_nested_priv priv = {
.data = (void *)upper_dev,
};
ASSERT_RTNL(); ASSERT_RTNL();
return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
upper_dev); &priv);
} }
EXPORT_SYMBOL(netdev_has_upper_dev); EXPORT_SYMBOL(netdev_has_upper_dev);
@ -6851,8 +6856,12 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
bool netdev_has_upper_dev_all_rcu(struct net_device *dev, bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev) struct net_device *upper_dev)
{ {
struct netdev_nested_priv priv = {
.data = (void *)upper_dev,
};
return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
upper_dev); &priv);
} }
EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
@ -6997,8 +7006,8 @@ static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
static int __netdev_walk_all_upper_dev(struct net_device *dev, static int __netdev_walk_all_upper_dev(struct net_device *dev,
int (*fn)(struct net_device *dev, int (*fn)(struct net_device *dev,
void *data), struct netdev_nested_priv *priv),
void *data) struct netdev_nested_priv *priv)
{ {
struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
@ -7010,7 +7019,7 @@ static int __netdev_walk_all_upper_dev(struct net_device *dev,
while (1) { while (1) {
if (now != dev) { if (now != dev) {
ret = fn(now, data); ret = fn(now, priv);
if (ret) if (ret)
return ret; return ret;
} }
@ -7046,8 +7055,8 @@ static int __netdev_walk_all_upper_dev(struct net_device *dev,
int netdev_walk_all_upper_dev_rcu(struct net_device *dev, int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *dev, int (*fn)(struct net_device *dev,
void *data), struct netdev_nested_priv *priv),
void *data) struct netdev_nested_priv *priv)
{ {
struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
@ -7058,7 +7067,7 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
while (1) { while (1) {
if (now != dev) { if (now != dev) {
ret = fn(now, data); ret = fn(now, priv);
if (ret) if (ret)
return ret; return ret;
} }
@ -7094,10 +7103,15 @@ EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
static bool __netdev_has_upper_dev(struct net_device *dev, static bool __netdev_has_upper_dev(struct net_device *dev,
struct net_device *upper_dev) struct net_device *upper_dev)
{ {
struct netdev_nested_priv priv = {
.flags = 0,
.data = (void *)upper_dev,
};
ASSERT_RTNL(); ASSERT_RTNL();
return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
upper_dev); &priv);
} }
/** /**
@ -7215,8 +7229,8 @@ static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
int netdev_walk_all_lower_dev(struct net_device *dev, int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *dev, int (*fn)(struct net_device *dev,
void *data), struct netdev_nested_priv *priv),
void *data) struct netdev_nested_priv *priv)
{ {
struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
@ -7227,7 +7241,7 @@ int netdev_walk_all_lower_dev(struct net_device *dev,
while (1) { while (1) {
if (now != dev) { if (now != dev) {
ret = fn(now, data); ret = fn(now, priv);
if (ret) if (ret)
return ret; return ret;
} }
@ -7262,8 +7276,8 @@ EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
static int __netdev_walk_all_lower_dev(struct net_device *dev, static int __netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *dev, int (*fn)(struct net_device *dev,
void *data), struct netdev_nested_priv *priv),
void *data) struct netdev_nested_priv *priv)
{ {
struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
@ -7275,7 +7289,7 @@ static int __netdev_walk_all_lower_dev(struct net_device *dev,
while (1) { while (1) {
if (now != dev) { if (now != dev) {
ret = fn(now, data); ret = fn(now, priv);
if (ret) if (ret)
return ret; return ret;
} }
@ -7364,22 +7378,34 @@ static u8 __netdev_lower_depth(struct net_device *dev)
return max_depth; return max_depth;
} }
static int __netdev_update_upper_level(struct net_device *dev, void *data) static int __netdev_update_upper_level(struct net_device *dev,
struct netdev_nested_priv *__unused)
{ {
dev->upper_level = __netdev_upper_depth(dev) + 1; dev->upper_level = __netdev_upper_depth(dev) + 1;
return 0; return 0;
} }
static int __netdev_update_lower_level(struct net_device *dev, void *data) static int __netdev_update_lower_level(struct net_device *dev,
struct netdev_nested_priv *priv)
{ {
dev->lower_level = __netdev_lower_depth(dev) + 1; dev->lower_level = __netdev_lower_depth(dev) + 1;
#ifdef CONFIG_LOCKDEP
if (!priv)
return 0;
if (priv->flags & NESTED_SYNC_IMM)
dev->nested_level = dev->lower_level - 1;
if (priv->flags & NESTED_SYNC_TODO)
net_unlink_todo(dev);
#endif
return 0; return 0;
} }
int netdev_walk_all_lower_dev_rcu(struct net_device *dev, int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *dev, int (*fn)(struct net_device *dev,
void *data), struct netdev_nested_priv *priv),
void *data) struct netdev_nested_priv *priv)
{ {
struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
@ -7390,7 +7416,7 @@ int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
while (1) { while (1) {
if (now != dev) { if (now != dev) {
ret = fn(now, data); ret = fn(now, priv);
if (ret) if (ret)
return ret; return ret;
} }
@ -7650,6 +7676,7 @@ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
static int __netdev_upper_dev_link(struct net_device *dev, static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master, struct net_device *upper_dev, bool master,
void *upper_priv, void *upper_info, void *upper_priv, void *upper_info,
struct netdev_nested_priv *priv,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct netdev_notifier_changeupper_info changeupper_info = { struct netdev_notifier_changeupper_info changeupper_info = {
@ -7706,9 +7733,9 @@ static int __netdev_upper_dev_link(struct net_device *dev,
__netdev_update_upper_level(dev, NULL); __netdev_update_upper_level(dev, NULL);
__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
__netdev_update_lower_level(upper_dev, NULL); __netdev_update_lower_level(upper_dev, priv);
__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
NULL); priv);
return 0; return 0;
@ -7733,8 +7760,13 @@ int netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, struct net_device *upper_dev,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct netdev_nested_priv priv = {
.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
.data = NULL,
};
return __netdev_upper_dev_link(dev, upper_dev, false, return __netdev_upper_dev_link(dev, upper_dev, false,
NULL, NULL, extack); NULL, NULL, &priv, extack);
} }
EXPORT_SYMBOL(netdev_upper_dev_link); EXPORT_SYMBOL(netdev_upper_dev_link);
@ -7757,21 +7789,19 @@ int netdev_master_upper_dev_link(struct net_device *dev,
void *upper_priv, void *upper_info, void *upper_priv, void *upper_info,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct netdev_nested_priv priv = {
.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
.data = NULL,
};
return __netdev_upper_dev_link(dev, upper_dev, true, return __netdev_upper_dev_link(dev, upper_dev, true,
upper_priv, upper_info, extack); upper_priv, upper_info, &priv, extack);
} }
EXPORT_SYMBOL(netdev_master_upper_dev_link); EXPORT_SYMBOL(netdev_master_upper_dev_link);
/** static void __netdev_upper_dev_unlink(struct net_device *dev,
* netdev_upper_dev_unlink - Removes a link to upper device struct net_device *upper_dev,
* @dev: device struct netdev_nested_priv *priv)
* @upper_dev: new upper device
*
* Removes a link to device which is upper to this one. The caller must hold
* the RTNL lock.
*/
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{ {
struct netdev_notifier_changeupper_info changeupper_info = { struct netdev_notifier_changeupper_info changeupper_info = {
.info = { .info = {
@ -7796,9 +7826,28 @@ void netdev_upper_dev_unlink(struct net_device *dev,
__netdev_update_upper_level(dev, NULL); __netdev_update_upper_level(dev, NULL);
__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
__netdev_update_lower_level(upper_dev, NULL); __netdev_update_lower_level(upper_dev, priv);
__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
NULL); priv);
}
/**
* netdev_upper_dev_unlink - Removes a link to upper device
* @dev: device
* @upper_dev: new upper device
*
* Removes a link to device which is upper to this one. The caller must hold
* the RTNL lock.
*/
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{
struct netdev_nested_priv priv = {
.flags = NESTED_SYNC_TODO,
.data = NULL,
};
__netdev_upper_dev_unlink(dev, upper_dev, &priv);
} }
EXPORT_SYMBOL(netdev_upper_dev_unlink); EXPORT_SYMBOL(netdev_upper_dev_unlink);
@ -7834,6 +7883,10 @@ int netdev_adjacent_change_prepare(struct net_device *old_dev,
struct net_device *dev, struct net_device *dev,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct netdev_nested_priv priv = {
.flags = 0,
.data = NULL,
};
int err; int err;
if (!new_dev) if (!new_dev)
@ -7841,8 +7894,8 @@ int netdev_adjacent_change_prepare(struct net_device *old_dev,
if (old_dev && new_dev != old_dev) if (old_dev && new_dev != old_dev)
netdev_adjacent_dev_disable(dev, old_dev); netdev_adjacent_dev_disable(dev, old_dev);
err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
err = netdev_upper_dev_link(new_dev, dev, extack); extack);
if (err) { if (err) {
if (old_dev && new_dev != old_dev) if (old_dev && new_dev != old_dev)
netdev_adjacent_dev_enable(dev, old_dev); netdev_adjacent_dev_enable(dev, old_dev);
@ -7857,6 +7910,11 @@ void netdev_adjacent_change_commit(struct net_device *old_dev,
struct net_device *new_dev, struct net_device *new_dev,
struct net_device *dev) struct net_device *dev)
{ {
struct netdev_nested_priv priv = {
.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
.data = NULL,
};
if (!new_dev || !old_dev) if (!new_dev || !old_dev)
return; return;
@ -7864,7 +7922,7 @@ void netdev_adjacent_change_commit(struct net_device *old_dev,
return; return;
netdev_adjacent_dev_enable(dev, old_dev); netdev_adjacent_dev_enable(dev, old_dev);
netdev_upper_dev_unlink(old_dev, dev); __netdev_upper_dev_unlink(old_dev, dev, &priv);
} }
EXPORT_SYMBOL(netdev_adjacent_change_commit); EXPORT_SYMBOL(netdev_adjacent_change_commit);
@ -7872,13 +7930,18 @@ void netdev_adjacent_change_abort(struct net_device *old_dev,
struct net_device *new_dev, struct net_device *new_dev,
struct net_device *dev) struct net_device *dev)
{ {
struct netdev_nested_priv priv = {
.flags = 0,
.data = NULL,
};
if (!new_dev) if (!new_dev)
return; return;
if (old_dev && new_dev != old_dev) if (old_dev && new_dev != old_dev)
netdev_adjacent_dev_enable(dev, old_dev); netdev_adjacent_dev_enable(dev, old_dev);
netdev_upper_dev_unlink(new_dev, dev); __netdev_upper_dev_unlink(new_dev, dev, &priv);
} }
EXPORT_SYMBOL(netdev_adjacent_change_abort); EXPORT_SYMBOL(netdev_adjacent_change_abort);
@ -10062,6 +10125,19 @@ static void netdev_wait_allrefs(struct net_device *dev)
void netdev_run_todo(void) void netdev_run_todo(void)
{ {
struct list_head list; struct list_head list;
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
list_replace_init(&net_unlink_list, &unlink_list);
while (!list_empty(&unlink_list)) {
struct net_device *dev = list_first_entry(&unlink_list,
struct net_device,
unlink_list);
list_del(&dev->unlink_list);
dev->nested_level = dev->lower_level - 1;
}
#endif
/* Snapshot list, allow later requests */ /* Snapshot list, allow later requests */
list_replace_init(&net_todo_list, &list); list_replace_init(&net_todo_list, &list);
@ -10274,6 +10350,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->gso_max_segs = GSO_MAX_SEGS; dev->gso_max_segs = GSO_MAX_SEGS;
dev->upper_level = 1; dev->upper_level = 1;
dev->lower_level = 1; dev->lower_level = 1;
#ifdef CONFIG_LOCKDEP
dev->nested_level = 0;
INIT_LIST_HEAD(&dev->unlink_list);
#endif
INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list); INIT_LIST_HEAD(&dev->unreg_list);

View File

@ -637,7 +637,7 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len) if (to->addr_len != from->addr_len)
return -EINVAL; return -EINVAL;
netif_addr_lock_nested(to); netif_addr_lock(to);
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err) if (!err)
__dev_set_rx_mode(to); __dev_set_rx_mode(to);
@ -667,7 +667,7 @@ int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len) if (to->addr_len != from->addr_len)
return -EINVAL; return -EINVAL;
netif_addr_lock_nested(to); netif_addr_lock(to);
err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len); err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
if (!err) if (!err)
__dev_set_rx_mode(to); __dev_set_rx_mode(to);
@ -700,7 +700,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
* larger. * larger.
*/ */
netif_addr_lock_bh(from); netif_addr_lock_bh(from);
netif_addr_lock_nested(to); netif_addr_lock(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len); __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
__dev_set_rx_mode(to); __dev_set_rx_mode(to);
netif_addr_unlock(to); netif_addr_unlock(to);
@ -867,7 +867,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len) if (to->addr_len != from->addr_len)
return -EINVAL; return -EINVAL;
netif_addr_lock_nested(to); netif_addr_lock(to);
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
if (!err) if (!err)
__dev_set_rx_mode(to); __dev_set_rx_mode(to);
@ -897,7 +897,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len) if (to->addr_len != from->addr_len)
return -EINVAL; return -EINVAL;
netif_addr_lock_nested(to); netif_addr_lock(to);
err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len); err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
if (!err) if (!err)
__dev_set_rx_mode(to); __dev_set_rx_mode(to);
@ -922,7 +922,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
/* See the above comments inside dev_uc_unsync(). */ /* See the above comments inside dev_uc_unsync(). */
netif_addr_lock_bh(from); netif_addr_lock_bh(from);
netif_addr_lock_nested(to); netif_addr_lock(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len); __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
__dev_set_rx_mode(to); __dev_set_rx_mode(to);
netif_addr_unlock(to); netif_addr_unlock(to);

View File

@ -5622,7 +5622,7 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
lse->label_stack_entry = mpls_lse; lse->label_stack_entry = mpls_lse;
skb_postpush_rcsum(skb, lse, MPLS_HLEN); skb_postpush_rcsum(skb, lse, MPLS_HLEN);
if (ethernet) if (ethernet && mac_len >= ETH_HLEN)
skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
skb->protocol = mpls_proto; skb->protocol = mpls_proto;
@ -5662,7 +5662,7 @@ int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len); skb_set_network_header(skb, mac_len);
if (ethernet) { if (ethernet && mac_len >= ETH_HLEN) {
struct ethhdr *hdr; struct ethhdr *hdr;
/* use mpls_hdr() to get ethertype to account for VLANs. */ /* use mpls_hdr() to get ethertype to account for VLANs. */

View File

@ -866,7 +866,7 @@ static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
[ETHNL_MCGRP_MONITOR] = { .name = ETHTOOL_MCGRP_MONITOR_NAME }, [ETHNL_MCGRP_MONITOR] = { .name = ETHTOOL_MCGRP_MONITOR_NAME },
}; };
static struct genl_family ethtool_genl_family = { static struct genl_family ethtool_genl_family __ro_after_init = {
.name = ETHTOOL_GENL_NAME, .name = ETHTOOL_GENL_NAME,
.version = ETHTOOL_GENL_VERSION, .version = ETHTOOL_GENL_VERSION,
.netnsok = true, .netnsok = true,

View File

@ -490,6 +490,7 @@ static struct xfrm_tunnel vti_ipip_handler __read_mostly = {
.priority = 0, .priority = 0,
}; };
#if IS_ENABLED(CONFIG_IPV6)
static struct xfrm_tunnel vti_ipip6_handler __read_mostly = { static struct xfrm_tunnel vti_ipip6_handler __read_mostly = {
.handler = vti_rcv_tunnel, .handler = vti_rcv_tunnel,
.cb_handler = vti_rcv_cb, .cb_handler = vti_rcv_cb,
@ -497,6 +498,7 @@ static struct xfrm_tunnel vti_ipip6_handler __read_mostly = {
.priority = 0, .priority = 0,
}; };
#endif #endif
#endif
static int __net_init vti_init_net(struct net *net) static int __net_init vti_init_net(struct net *net)
{ {

View File

@ -293,6 +293,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TcpTimeoutRehash", LINUX_MIB_TCPTIMEOUTREHASH), SNMP_MIB_ITEM("TcpTimeoutRehash", LINUX_MIB_TCPTIMEOUTREHASH),
SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH), SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH),
SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS), SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS),
SNMP_MIB_ITEM("TCPDSACKIgnoredDubious", LINUX_MIB_TCPDSACKIGNOREDDUBIOUS),
SNMP_MIB_SENTINEL SNMP_MIB_SENTINEL
}; };

View File

@ -214,7 +214,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
sock_rps_save_rxhash(child, skb); sock_rps_save_rxhash(child, skb);
if (rsk_drop_req(req)) { if (rsk_drop_req(req)) {
refcount_set(&req->rsk_refcnt, 2); reqsk_put(req);
return child; return child;
} }

Some files were not shown because too many files have changed in this diff Show More