net: remove 'fallback' argument from dev->ndo_select_queue()

After the previous patch, all the callers of ndo_select_queue()
provide as a 'fallback' argument netdev_pick_tx.
The only exceptions are nested calls to ndo_select_queue(),
which pass down the 'fallback' available in the current scope
- still netdev_pick_tx.

We can drop such argument and replace fallback() invocation with
netdev_pick_tx(). This avoids an indirect call per xmit packet
in some scenarios (TCP syn, UDP unconnected, XDP generic, pktgen)
with device drivers implementing such ndo. It also clean the code
a bit.

Tested with ixgbe and CONFIG_FCOE=m

With pktgen using queue xmit:
threads		vanilla 	patched
		(kpps)		(kpps)
1		2334		2428
2		4166		4278
4		7895		8100

 v1 -> v2:
 - rebased after helper's name change

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Paolo Abeni 2019-03-20 11:02:06 +01:00 committed by David S. Miller
parent b71b5837f8
commit a350eccee5
32 changed files with 57 additions and 97 deletions

View File

@ -423,8 +423,7 @@ tx_finish:
static u16 hfi1_vnic_select_queue(struct net_device *netdev, static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
struct opa_vnic_skb_mdata *mdata; struct opa_vnic_skb_mdata *mdata;

View File

@ -95,8 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
} }
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct opa_vnic_skb_mdata *mdata; struct opa_vnic_skb_mdata *mdata;
@ -106,8 +105,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata = skb_push(skb, sizeof(*mdata)); mdata = skb_push(skb, sizeof(*mdata));
mdata->entropy = opa_vnic_calc_entropy(skb); mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb); mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb, rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
sb_dev, fallback);
skb_pull(skb, sizeof(*mdata)); skb_pull(skb, sizeof(*mdata));
return rc; return rc;
} }

View File

@ -4114,8 +4114,7 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
/* This helper function exists to help dev_pick_tx get the correct /* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to * destination queue. Using a helper function skips a call to

View File

@ -2258,8 +2258,7 @@ error_drop_packet:
} }
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
u16 qid; u16 qid;
/* we suspect that this is good for in--kernel network services that /* we suspect that this is good for in--kernel network services that
@ -2269,7 +2268,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb)) if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb); qid = skb_get_rx_queue(skb);
else else
qid = fallback(dev, skb, NULL); qid = netdev_pick_tx(dev, skb, NULL);
return qid; return qid;
} }

View File

@ -2274,8 +2274,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
}; };
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_priv *priv = netdev_priv(dev);
u16 queue = skb_get_queue_mapping(skb); u16 queue = skb_get_queue_mapping(skb);
@ -2283,7 +2282,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port; unsigned int q, port;
if (!netdev_uses_dsa(dev)) if (!netdev_uses_dsa(dev))
return fallback(dev, skb, NULL); return netdev_pick_tx(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */ /* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue); q = BRCM_TAG_GET_QUEUE(queue);
@ -2291,7 +2290,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring)) if (unlikely(!tx_ring))
return fallback(dev, skb, NULL); return netdev_pick_tx(dev, skb, NULL);
return tx_ring->index; return tx_ring->index;
} }

View File

@ -1909,8 +1909,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
} }
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct bnx2x *bp = netdev_priv(dev); struct bnx2x *bp = netdev_priv(dev);
@ -1932,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
} }
/* select a non-FCoE queue */ /* select a non-FCoE queue */
return fallback(dev, skb, NULL) % return netdev_pick_tx(dev, skb, NULL) %
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
} }

View File

@ -498,8 +498,7 @@ int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
/* select_queue callback */ /* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev);
select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp, static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp, struct bnx2x_fastpath *fp,

View File

@ -979,8 +979,7 @@ freeout:
} }
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
int txq; int txq;
@ -1022,7 +1021,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq; return txq;
} }
return fallback(dev, skb, NULL) % dev->real_num_tx_queues; return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
} }
static int closest_timer(const struct sge *s, int time) static int closest_timer(const struct sge *s, int time)

View File

@ -1964,8 +1964,7 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16 static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev); struct hns_nic_priv *priv = netdev_priv(ndev);
@ -1975,7 +1974,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest)) is_multicast_ether_addr(eth_hdr->h_dest))
return 0; return 0;
else else
return fallback(ndev, skb, NULL); return netdev_pick_tx(ndev, skb, NULL);
} }
static const struct net_device_ops hns_nic_netdev_ops = { static const struct net_device_ops hns_nic_netdev_ops = {

View File

@ -8483,8 +8483,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct ixgbe_adapter *adapter; struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f; struct ixgbe_ring_feature *f;
@ -8514,7 +8513,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
break; break;
/* fall through */ /* fall through */
default: default:
return fallback(dev, skb, sb_dev); return netdev_pick_tx(dev, skb, sb_dev);
} }
f = &adapter->ring_feature[RING_F_FCOE]; f = &adapter->ring_feature[RING_F_FCOE];

View File

@ -685,16 +685,15 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
} }
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up; u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev)) if (netdev_get_num_tc(dev))
return fallback(dev, skb, NULL); return netdev_pick_tx(dev, skb, NULL);
return fallback(dev, skb, NULL) % rings_p_up; return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
} }
static void mlx4_bf_copy(void __iomem *dst, const void *src, static void mlx4_bf_copy(void __iomem *dst, const void *src,

View File

@ -698,8 +698,7 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq); void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev);
select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame, struct mlx4_en_rx_alloc *frame,

View File

@ -769,8 +769,7 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void); void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev);
select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi); struct mlx5e_tx_wqe *wqe, u16 pi);

View File

@ -110,11 +110,10 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif #endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
int channel_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb, NULL);
u16 num_channels; u16 num_channels;
int up = 0; int up = 0;

View File

@ -498,8 +498,7 @@ struct qede_reload_args {
/* Datapath functions definition */ /* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev);
select_queue_fallback_t fallback);
netdev_features_t qede_features_check(struct sk_buff *skb, netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev, struct net_device *dev,
netdev_features_t features); netdev_features_t features);

View File

@ -1696,8 +1696,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
} }
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
int total_txq; int total_txq;
@ -1705,7 +1704,7 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
return QEDE_TSS_COUNT(edev) ? return QEDE_TSS_COUNT(edev) ?
fallback(dev, skb, NULL) % total_txq : 0; netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
} }
/* 8B udp header + 8B base tunnel header + 32B option length */ /* 8B udp header + 8B base tunnel header + 32B option length */

View File

@ -1615,8 +1615,7 @@ drop:
} }
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
/* If skb needs TX timestamp, it is handled in network control queue */ /* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :

View File

@ -101,8 +101,7 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
} }
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct vnet_port *port = netdev_priv(dev); struct vnet_port *port = netdev_priv(dev);

View File

@ -234,8 +234,7 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
} }
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct vnet *vp = netdev_priv(dev); struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb); struct vnet_port *port = __tx_port_find(vp, skb);

View File

@ -308,7 +308,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev,
* If a valid queue has already been assigned, then use that. * If a valid queue has already been assigned, then use that.
* Otherwise compute tx queue based on hash and the send table. * Otherwise compute tx queue based on hash and the send table.
* *
* This is basically similar to default (__netdev_pick_tx) with the added step * This is basically similar to default (netdev_pick_tx) with the added step
* of using the host send_table when no other queue has been assigned. * of using the host send_table when no other queue has been assigned.
* *
* TODO support XPS - but get_xps_queue not exported * TODO support XPS - but get_xps_queue not exported
@ -331,8 +331,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
} }
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct net_device_context *ndc = netdev_priv(ndev); struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev; struct net_device *vf_netdev;
@ -344,10 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
if (vf_ops->ndo_select_queue) if (vf_ops->ndo_select_queue)
txq = vf_ops->ndo_select_queue(vf_netdev, skb, txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
sb_dev, fallback);
else else
txq = fallback(vf_netdev, skb, NULL); txq = netdev_pick_tx(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be /* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than * used for common case where VF has more queues than

View File

@ -115,8 +115,7 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
static u16 net_failover_select_queue(struct net_device *dev, static u16 net_failover_select_queue(struct net_device *dev,
struct sk_buff *skb, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct net_failover_info *nfo_info = netdev_priv(dev); struct net_failover_info *nfo_info = netdev_priv(dev);
struct net_device *primary_dev; struct net_device *primary_dev;
@ -127,10 +126,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
const struct net_device_ops *ops = primary_dev->netdev_ops; const struct net_device_ops *ops = primary_dev->netdev_ops;
if (ops->ndo_select_queue) if (ops->ndo_select_queue)
txq = ops->ndo_select_queue(primary_dev, skb, txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
sb_dev, fallback);
else else
txq = fallback(primary_dev, skb, NULL); txq = netdev_pick_tx(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;

View File

@ -1691,8 +1691,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
} }
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
/* /*
* This helper function exists to help dev_pick_tx get the correct * This helper function exists to help dev_pick_tx get the correct

View File

@ -606,8 +606,7 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
} }
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct tun_struct *tun = netdev_priv(dev); struct tun_struct *tun = netdev_priv(dev);
u16 ret; u16 ret;

View File

@ -1282,8 +1282,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16 static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
skb->priority = cfg80211_classify8021d(skb, NULL); skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority]; return mwifiex_1d_to_wmm_queue[skb->priority];

View File

@ -148,8 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
} }
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct xenvif *vif = netdev_priv(dev); struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size; unsigned int size = vif->hash.size;
@ -162,7 +161,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
return 0; return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return fallback(dev, skb, NULL) % dev->real_num_tx_queues; return netdev_pick_tx(dev, skb, NULL) %
dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb); xenvif_set_skb_hash(vif, skb);

View File

@ -543,8 +543,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
} }
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
unsigned int num_queues = dev->real_num_tx_queues; unsigned int num_queues = dev->real_num_tx_queues;
u32 hash; u32 hash;

View File

@ -245,8 +245,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
} }
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct adapter *padapter = rtw_netdev_priv(dev); struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv;

View File

@ -404,8 +404,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct adapter *padapter = rtw_netdev_priv(dev); struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv;

View File

@ -986,8 +986,7 @@ struct devlink;
* those the driver believes to be appropriate. * those the driver believes to be appropriate.
* *
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* struct net_device *sb_dev, * struct net_device *sb_dev);
* select_queue_fallback_t fallback);
* Called to decide which queue to use when device supports multiple * Called to decide which queue to use when device supports multiple
* transmit queues. * transmit queues.
* *
@ -1268,8 +1267,7 @@ struct net_device_ops {
netdev_features_t features); netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev, u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev);
select_queue_fallback_t fallback);
void (*ndo_change_rx_flags)(struct net_device *dev, void (*ndo_change_rx_flags)(struct net_device *dev,
int flags); int flags);
void (*ndo_set_rx_mode)(struct net_device *dev); void (*ndo_set_rx_mode)(struct net_device *dev);
@ -2641,11 +2639,9 @@ void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev); void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev);
select_queue_fallback_t fallback);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev);
select_queue_fallback_t fallback);
int dev_queue_xmit(struct sk_buff *skb); int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);

View File

@ -3689,16 +3689,14 @@ get_cpus_map:
} }
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
return 0; return 0;
} }
EXPORT_SYMBOL(dev_pick_tx_zero); EXPORT_SYMBOL(dev_pick_tx_zero);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
} }
@ -3748,8 +3746,7 @@ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
const struct net_device_ops *ops = dev->netdev_ops; const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue) if (ops->ndo_select_queue)
queue_index = ops->ndo_select_queue(dev, skb, sb_dev, queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
netdev_pick_tx);
else else
queue_index = netdev_pick_tx(dev, skb, sb_dev); queue_index = netdev_pick_tx(dev, skb, sb_dev);

View File

@ -1133,8 +1133,7 @@ static void ieee80211_uninit(struct net_device *dev)
static u16 ieee80211_netdev_select_queue(struct net_device *dev, static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
} }
@ -1179,8 +1178,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
static u16 ieee80211_monitor_select_queue(struct net_device *dev, static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct sk_buff *skb, struct sk_buff *skb,
struct net_device *sb_dev, struct net_device *sb_dev)
select_queue_fallback_t fallback)
{ {
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local; struct ieee80211_local *local = sdata->local;

View File

@ -287,8 +287,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
#endif #endif
skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
if (ops->ndo_select_queue) { if (ops->ndo_select_queue) {
queue_index = ops->ndo_select_queue(dev, skb, NULL, queue_index = ops->ndo_select_queue(dev, skb, NULL);
netdev_pick_tx);
queue_index = netdev_cap_txqueue(dev, queue_index); queue_index = netdev_cap_txqueue(dev, queue_index);
} else { } else {
queue_index = netdev_pick_tx(dev, skb, NULL); queue_index = netdev_pick_tx(dev, skb, NULL);