Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next-2.6

This commit is contained in:
David S. Miller 2011-05-17 14:17:05 -04:00
commit c8144a3682
5 changed files with 29 additions and 21 deletions

View File

@ -798,11 +798,6 @@ void efx_link_status_changed(struct efx_nic *efx)
if (!netif_running(efx->net_dev))
return;
if (efx->port_inhibited) {
netif_carrier_off(efx->net_dev);
return;
}
if (link_state->up != netif_carrier_ok(efx->net_dev)) {
efx->n_link_state_changes++;
@ -1319,8 +1314,20 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static void efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
/* We need to adjust the TX queue numbers if we have separate
* RX-only and TX-only channels.
*/
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);
}
}
static int efx_probe_nic(struct efx_nic *efx)
@ -1438,7 +1445,7 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
if (efx_dev_registered(efx) && !efx->port_inhibited)
if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)
@ -2102,6 +2109,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method));
netif_device_detach(efx->net_dev);
efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
@ -2135,6 +2143,7 @@ out:
efx->state = STATE_DISABLED;
} else {
netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
netif_device_attach(efx->net_dev);
}
return rc;
}

View File

@ -955,8 +955,9 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
return efx_filter_remove_filter(efx, &filter);
else
return efx_filter_insert_filter(efx, &filter, true);
rc = efx_filter_insert_filter(efx, &filter, true);
return rc < 0 ? rc : 0;
}
static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,

View File

@ -670,13 +670,12 @@ struct efx_filter_state;
* @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* @port_inhibited, efx_monitor() and efx_reconfigure_port()
* efx_monitor() and efx_reconfigure_port()
* @port_enabled: Port enabled indicator.
* Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
* efx_mac_work() with kernel interfaces. Safe to read under any
* one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
* be held to modify it.
* @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @stats_buffer: DMA buffer for statistics
@ -764,7 +763,6 @@ struct efx_nic {
struct mutex mac_lock;
struct work_struct mac_work;
bool port_enabled;
bool port_inhibited;
bool port_initialized;
struct net_device *net_dev;

View File

@ -695,12 +695,12 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* Offline (i.e. disruptive) testing
* This checks MAC and PHY loopback on the specified port. */
/* force the carrier state off so the kernel doesn't transmit during
* the loopback test, and the watchdog timeout doesn't fire. Also put
* falcon into loopback for the register test.
/* Detach the device so the kernel doesn't transmit during the
* loopback test and the watchdog timeout doesn't fire.
*/
netif_device_detach(efx->net_dev);
mutex_lock(&efx->mac_lock);
efx->port_inhibited = true;
if (efx->loopback_modes) {
/* We need the 312 clock from the PHY to test the XMAC
* registers, so move into XGMII loopback if available */
@ -750,12 +750,11 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* restore the PHY to the previous state */
mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode;
efx->port_inhibited = false;
efx->loopback_mode = loopback_mode;
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
netif_tx_wake_all_queues(efx->net_dev);
netif_device_attach(efx->net_dev);
return rc_test;
}

View File

@ -205,7 +205,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
goto unwind;
}
smp_mb();
netif_tx_start_queue(tx_queue->core_txq);
if (likely(!efx->loopback_selftest))
netif_tx_start_queue(
tx_queue->core_txq);
}
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@ -338,8 +340,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct efx_tx_queue *tx_queue;
unsigned index, type;
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
index = skb_get_queue_mapping(skb);
type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
@ -436,7 +437,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(!efx->port_inhibited)) {
likely(netif_device_present(efx->net_dev))) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));