forcedeth: tx timeout fix
This patch fixes the tx_timeout() to properly handle the clean up of the tx ring. It also sets the tx put pointer back to the correct position to be in sync with HW. Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c759a6b4e1
commit
8f955d7f04
|
@ -1880,6 +1880,7 @@ static void nv_init_tx(struct net_device *dev)
|
||||||
np->tx_pkts_in_progress = 0;
|
np->tx_pkts_in_progress = 0;
|
||||||
np->tx_change_owner = NULL;
|
np->tx_change_owner = NULL;
|
||||||
np->tx_end_flip = NULL;
|
np->tx_end_flip = NULL;
|
||||||
|
np->tx_stop = 0;
|
||||||
|
|
||||||
for (i = 0; i < np->tx_ring_size; i++) {
|
for (i = 0; i < np->tx_ring_size; i++) {
|
||||||
if (!nv_optimized(np)) {
|
if (!nv_optimized(np)) {
|
||||||
|
@ -2530,6 +2531,8 @@ static void nv_tx_timeout(struct net_device *dev)
|
||||||
struct fe_priv *np = netdev_priv(dev);
|
struct fe_priv *np = netdev_priv(dev);
|
||||||
u8 __iomem *base = get_hwbase(dev);
|
u8 __iomem *base = get_hwbase(dev);
|
||||||
u32 status;
|
u32 status;
|
||||||
|
union ring_type put_tx;
|
||||||
|
int saved_tx_limit;
|
||||||
|
|
||||||
if (np->msi_flags & NV_MSI_X_ENABLED)
|
if (np->msi_flags & NV_MSI_X_ENABLED)
|
||||||
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
|
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
|
||||||
|
@ -2589,24 +2592,32 @@ static void nv_tx_timeout(struct net_device *dev)
|
||||||
/* 1) stop tx engine */
|
/* 1) stop tx engine */
|
||||||
nv_stop_tx(dev);
|
nv_stop_tx(dev);
|
||||||
|
|
||||||
/* 2) check that the packets were not sent already: */
|
/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
|
||||||
|
saved_tx_limit = np->tx_limit;
|
||||||
|
np->tx_limit = 0; /* prevent giving HW any limited pkts */
|
||||||
|
np->tx_stop = 0; /* prevent waking tx queue */
|
||||||
if (!nv_optimized(np))
|
if (!nv_optimized(np))
|
||||||
nv_tx_done(dev, np->tx_ring_size);
|
nv_tx_done(dev, np->tx_ring_size);
|
||||||
else
|
else
|
||||||
nv_tx_done_optimized(dev, np->tx_ring_size);
|
nv_tx_done_optimized(dev, np->tx_ring_size);
|
||||||
|
|
||||||
/* 3) if there are dead entries: clear everything */
|
/* save current HW postion */
|
||||||
if (np->get_tx_ctx != np->put_tx_ctx) {
|
if (np->tx_change_owner)
|
||||||
printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
|
put_tx.ex = np->tx_change_owner->first_tx_desc;
|
||||||
nv_drain_tx(dev);
|
else
|
||||||
nv_init_tx(dev);
|
put_tx = np->put_tx;
|
||||||
setup_hw_rings(dev, NV_SETUP_TX_RING);
|
|
||||||
}
|
|
||||||
|
|
||||||
netif_wake_queue(dev);
|
/* 3) clear all tx state */
|
||||||
|
nv_drain_tx(dev);
|
||||||
|
nv_init_tx(dev);
|
||||||
|
|
||||||
/* 4) restart tx engine */
|
/* 4) restore state to current HW position */
|
||||||
|
np->get_tx = np->put_tx = put_tx;
|
||||||
|
np->tx_limit = saved_tx_limit;
|
||||||
|
|
||||||
|
/* 5) restart tx engine */
|
||||||
nv_start_tx(dev);
|
nv_start_tx(dev);
|
||||||
|
netif_wake_queue(dev);
|
||||||
spin_unlock_irq(&np->lock);
|
spin_unlock_irq(&np->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue