stmmac: protect tx process with lock (V4)

This patch fixes a problem raised on Orly ARM SMP platform
where, in case of fragmented frames, the descriptors
in the TX ring resulted broken. This was due to a missing lock
protection in the tx process.

Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com>
Tested-by: Srinivas Kandagatla <srinivas.kandagatla@st.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Giuseppe CAVALLARO 2011-10-18 00:01:19 +00:00 committed by David S. Miller
parent 79ee1dc32b
commit a9097a9666
2 changed files with 9 additions and 0 deletions

View File

@ -70,6 +70,7 @@ struct stmmac_priv {
u32 msg_enable;
spinlock_t lock;
spinlock_t tx_lock;
int wolopts;
int wolenabled;
int wol_irq;

View File

@ -588,6 +588,8 @@ static void stmmac_tx(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
spin_lock(&priv->tx_lock);
while (priv->dirty_tx != priv->cur_tx) {
int last;
unsigned int entry = priv->dirty_tx % txsize;
@ -651,6 +653,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
spin_unlock(&priv->tx_lock);
}
static inline void stmmac_enable_irq(struct stmmac_priv *priv)
@ -1078,6 +1081,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
spin_lock(&priv->tx_lock);
entry = priv->cur_tx % txsize;
#ifdef STMMAC_XMIT_DEBUG
@ -1166,6 +1171,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
spin_unlock(&priv->tx_lock);
return NETDEV_TX_OK;
}
@ -1731,6 +1738,7 @@ static int stmmac_probe(struct net_device *dev)
"please, use ifconfig or nwhwconfig!\n");
spin_lock_init(&priv->lock);
spin_lock_init(&priv->tx_lock);
ret = register_netdev(dev);
if (ret) {