forcedeth: remove redudant assignments in xmit

In xmit process, the variables are set many times. In fact,
it is enough for these variables to be set once.
After a long time test, the throughput performance is better
than before.

CC: Srinivas Eeda <srinivas.eeda@oracle.com>
CC: Joe Jin <joe.jin@oracle.com>
CC: Junxiao Bi <junxiao.bi@oracle.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Zhu Yanjun 2017-11-10 21:10:00 -05:00 committed by David S. Miller
parent 6afce19623
commit 0d728b844c
1 changed files with 20 additions and 8 deletions

View File

@ -2226,8 +2226,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the header buffer */
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data + offset, bcnt,
@ -2262,8 +2260,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset = 0;
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
@ -2304,6 +2300,16 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
} while (frag_size);
}
if (unlikely(put_tx == np->first_tx.orig))
prev_tx = np->last_tx.orig;
else
prev_tx = put_tx - 1;
if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
/* set last fragment flag */
prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
@ -2377,8 +2383,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* setup the header buffer */
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data + offset, bcnt,
@ -2414,8 +2418,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset = 0;
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
@ -2456,6 +2458,16 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
} while (frag_size);
}
if (unlikely(put_tx == np->first_tx.ex))
prev_tx = np->last_tx.ex;
else
prev_tx = put_tx - 1;
if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
/* set last fragment flag */
prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);