net: Fix high overhead of vlan sub-device teardown.
When a networking device is taken down that has a non-trivial number of VLAN devices configured under it, we eat a full synchronize_net() for every such VLAN device. This is because of the call chain: NETDEV_DOWN notifier --> vlan_device_event() --> dev_change_flags() --> __dev_change_flags() --> __dev_close() --> __dev_close_many() --> dev_deactivate_many() --> synchronize_net() This is kind of rediculous because we already have infrastructure for batching doing operation X to a list of net devices so that we only incur one sync. So make use of that by exporting dev_close_many() and adjusting it's interfaace so that the caller can fully manage the batch list. Use this in vlan_device_event() and all the overhead goes away. Reported-by: Salam Noureddine <noureddine@arista.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
738e6d30d3
commit
99c4a26a15
|
@ -2156,6 +2156,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name);
|
||||||
int dev_alloc_name(struct net_device *dev, const char *name);
|
int dev_alloc_name(struct net_device *dev, const char *name);
|
||||||
int dev_open(struct net_device *dev);
|
int dev_open(struct net_device *dev);
|
||||||
int dev_close(struct net_device *dev);
|
int dev_close(struct net_device *dev);
|
||||||
|
int dev_close_many(struct list_head *head, bool unlink);
|
||||||
void dev_disable_lro(struct net_device *dev);
|
void dev_disable_lro(struct net_device *dev);
|
||||||
int dev_loopback_xmit(struct sk_buff *newskb);
|
int dev_loopback_xmit(struct sk_buff *newskb);
|
||||||
int dev_queue_xmit(struct sk_buff *skb);
|
int dev_queue_xmit(struct sk_buff *skb);
|
||||||
|
|
|
@ -413,7 +413,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
||||||
vlan_transfer_features(dev, vlandev);
|
vlan_transfer_features(dev, vlandev);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NETDEV_DOWN:
|
case NETDEV_DOWN: {
|
||||||
|
struct net_device *tmp;
|
||||||
|
LIST_HEAD(close_list);
|
||||||
|
|
||||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||||
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
|
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
|
||||||
|
|
||||||
|
@ -425,11 +428,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
||||||
|
|
||||||
vlan = vlan_dev_priv(vlandev);
|
vlan = vlan_dev_priv(vlandev);
|
||||||
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
|
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
|
||||||
dev_change_flags(vlandev, flgs & ~IFF_UP);
|
list_add(&vlandev->close_list, &close_list);
|
||||||
netif_stacked_transfer_operstate(dev, vlandev);
|
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
|
|
||||||
|
dev_close_many(&close_list, false);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
|
||||||
|
netif_stacked_transfer_operstate(dev, vlandev);
|
||||||
|
list_del_init(&vlandev->close_list);
|
||||||
|
}
|
||||||
|
list_del(&close_list);
|
||||||
|
break;
|
||||||
|
}
|
||||||
case NETDEV_UP:
|
case NETDEV_UP:
|
||||||
/* Put all VLANs for this dev in the up state too. */
|
/* Put all VLANs for this dev in the up state too. */
|
||||||
vlan_group_for_each_dev(grp, i, vlandev) {
|
vlan_group_for_each_dev(grp, i, vlandev) {
|
||||||
|
|
|
@ -1385,7 +1385,7 @@ static int __dev_close(struct net_device *dev)
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dev_close_many(struct list_head *head)
|
int dev_close_many(struct list_head *head, bool unlink)
|
||||||
{
|
{
|
||||||
struct net_device *dev, *tmp;
|
struct net_device *dev, *tmp;
|
||||||
|
|
||||||
|
@ -1399,11 +1399,13 @@ static int dev_close_many(struct list_head *head)
|
||||||
list_for_each_entry_safe(dev, tmp, head, close_list) {
|
list_for_each_entry_safe(dev, tmp, head, close_list) {
|
||||||
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
|
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
|
||||||
call_netdevice_notifiers(NETDEV_DOWN, dev);
|
call_netdevice_notifiers(NETDEV_DOWN, dev);
|
||||||
list_del_init(&dev->close_list);
|
if (unlink)
|
||||||
|
list_del_init(&dev->close_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(dev_close_many);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dev_close - shutdown an interface.
|
* dev_close - shutdown an interface.
|
||||||
|
@ -1420,7 +1422,7 @@ int dev_close(struct net_device *dev)
|
||||||
LIST_HEAD(single);
|
LIST_HEAD(single);
|
||||||
|
|
||||||
list_add(&dev->close_list, &single);
|
list_add(&dev->close_list, &single);
|
||||||
dev_close_many(&single);
|
dev_close_many(&single, true);
|
||||||
list_del(&single);
|
list_del(&single);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -5986,7 +5988,7 @@ static void rollback_registered_many(struct list_head *head)
|
||||||
/* If device is running, close it first. */
|
/* If device is running, close it first. */
|
||||||
list_for_each_entry(dev, head, unreg_list)
|
list_for_each_entry(dev, head, unreg_list)
|
||||||
list_add_tail(&dev->close_list, &close_head);
|
list_add_tail(&dev->close_list, &close_head);
|
||||||
dev_close_many(&close_head);
|
dev_close_many(&close_head, true);
|
||||||
|
|
||||||
list_for_each_entry(dev, head, unreg_list) {
|
list_for_each_entry(dev, head, unreg_list) {
|
||||||
/* And unlink it from device chain. */
|
/* And unlink it from device chain. */
|
||||||
|
|
Loading…
Reference in New Issue