remove NETIF_F_TSO ifdefery

Remove the NETIF_F_TSO #ifdef-ery in drivers/net; this was
for old-old-2.4 compat (even current 2.4 has NETIF_F_TSO)
but it's time to get rid of it by now.

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
Arjan van de Ven 2006-12-12 14:06:23 +01:00 committed by Jeff Garzik
parent 6d24998f07
commit 1d39ed565c
11 changed files with 3 additions and 115 deletions

View File

@ -39,12 +39,10 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#define BCM_VLAN 1 #define BCM_VLAN 1
#endif #endif
#ifdef NETIF_F_TSO
#include <net/ip.h> #include <net/ip.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <net/checksum.h> #include <net/checksum.h>
#define BCM_TSO 1 #define BCM_TSO 1
#endif
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
@ -1728,7 +1726,7 @@ bnx2_tx_int(struct bnx2 *bp)
tx_buf = &bp->tx_buf_ring[sw_ring_cons]; tx_buf = &bp->tx_buf_ring[sw_ring_cons];
skb = tx_buf->skb; skb = tx_buf->skb;
#ifdef BCM_TSO
/* partial BD completions possible with TSO packets */ /* partial BD completions possible with TSO packets */
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
u16 last_idx, last_ring_idx; u16 last_idx, last_ring_idx;
@ -1744,7 +1742,7 @@ bnx2_tx_int(struct bnx2 *bp)
break; break;
} }
} }
#endif
pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE); skb_headlen(skb), PCI_DMA_TODEVICE);
@ -4514,7 +4512,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= vlan_tag_flags |=
(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
} }
#ifdef BCM_TSO
if ((mss = skb_shinfo(skb)->gso_size) && if ((mss = skb_shinfo(skb)->gso_size) &&
(skb->len > (bp->dev->mtu + ETH_HLEN))) { (skb->len > (bp->dev->mtu + ETH_HLEN))) {
u32 tcp_opt_len, ip_tcp_len; u32 tcp_opt_len, ip_tcp_len;
@ -4547,7 +4544,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
} }
else else
#endif
{ {
mss = 0; mss = 0;
} }
@ -5544,10 +5540,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
.set_tx_csum = ethtool_op_set_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg, .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg, .set_sg = ethtool_op_set_sg,
#ifdef BCM_TSO
.get_tso = ethtool_op_get_tso, .get_tso = ethtool_op_get_tso,
.set_tso = bnx2_set_tso, .set_tso = bnx2_set_tso,
#endif
.self_test_count = bnx2_self_test_count, .self_test_count = bnx2_self_test_count,
.self_test = bnx2_self_test, .self_test = bnx2_self_test,
.get_strings = bnx2_get_strings, .get_strings = bnx2_get_strings,
@ -6104,9 +6098,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef BCM_VLAN #ifdef BCM_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
#endif #endif
#ifdef BCM_TSO
dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
#endif
netif_carrier_off(bp->dev); netif_carrier_off(bp->dev);

View File

@ -59,17 +59,13 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/ip.h> #include <linux/ip.h>
#ifdef NETIF_F_TSO6
#include <linux/ipv6.h> #include <linux/ipv6.h>
#endif
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/udp.h> #include <linux/udp.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#ifdef NETIF_F_TSO
#include <net/checksum.h> #include <net/checksum.h>
#endif
#include <linux/mii.h> #include <linux/mii.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
@ -347,9 +343,7 @@ struct e1000_adapter {
boolean_t have_msi; boolean_t have_msi;
#endif #endif
/* to not mess up cache alignment, always add to the bottom */ /* to not mess up cache alignment, always add to the bottom */
#ifdef NETIF_F_TSO
boolean_t tso_force; boolean_t tso_force;
#endif
boolean_t smart_power_down; /* phy smart power down */ boolean_t smart_power_down; /* phy smart power down */
boolean_t quad_port_a; boolean_t quad_port_a;
unsigned long flags; unsigned long flags;

View File

@ -338,7 +338,6 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
return 0; return 0;
} }
#ifdef NETIF_F_TSO
static int static int
e1000_set_tso(struct net_device *netdev, uint32_t data) e1000_set_tso(struct net_device *netdev, uint32_t data)
{ {
@ -352,18 +351,15 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
else else
netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO;
#ifdef NETIF_F_TSO6
if (data) if (data)
netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_TSO6;
else else
netdev->features &= ~NETIF_F_TSO6; netdev->features &= ~NETIF_F_TSO6;
#endif
DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = TRUE; adapter->tso_force = TRUE;
return 0; return 0;
} }
#endif /* NETIF_F_TSO */
static uint32_t static uint32_t
e1000_get_msglevel(struct net_device *netdev) e1000_get_msglevel(struct net_device *netdev)
@ -1971,10 +1967,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_tx_csum = e1000_set_tx_csum, .set_tx_csum = e1000_set_tx_csum,
.get_sg = ethtool_op_get_sg, .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg, .set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso, .get_tso = ethtool_op_get_tso,
.set_tso = e1000_set_tso, .set_tso = e1000_set_tso,
#endif
.self_test_count = e1000_diag_test_count, .self_test_count = e1000_diag_test_count,
.self_test = e1000_diag_test, .self_test = e1000_diag_test,
.get_strings = e1000_get_strings, .get_strings = e1000_get_strings,

View File

@ -990,16 +990,12 @@ e1000_probe(struct pci_dev *pdev,
netdev->features &= ~NETIF_F_HW_VLAN_FILTER; netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
} }
#ifdef NETIF_F_TSO
if ((adapter->hw.mac_type >= e1000_82544) && if ((adapter->hw.mac_type >= e1000_82544) &&
(adapter->hw.mac_type != e1000_82547)) (adapter->hw.mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO;
#ifdef NETIF_F_TSO6
if (adapter->hw.mac_type > e1000_82547_rev_2) if (adapter->hw.mac_type > e1000_82547_rev_2)
netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_TSO6;
#endif
#endif
if (pci_using_dac) if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
@ -2626,7 +2622,6 @@ e1000_watchdog(unsigned long data)
E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
} }
#ifdef NETIF_F_TSO
/* disable TSO for pcie and 10/100 speeds, to avoid /* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues */ * some hardware issues */
if (!adapter->tso_force && if (!adapter->tso_force &&
@ -2637,22 +2632,17 @@ e1000_watchdog(unsigned long data)
DPRINTK(PROBE,INFO, DPRINTK(PROBE,INFO,
"10/100 speed: disabling TSO\n"); "10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO;
#ifdef NETIF_F_TSO6
netdev->features &= ~NETIF_F_TSO6; netdev->features &= ~NETIF_F_TSO6;
#endif
break; break;
case SPEED_1000: case SPEED_1000:
netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO;
#ifdef NETIF_F_TSO6
netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_TSO6;
#endif
break; break;
default: default:
/* oops */ /* oops */
break; break;
} }
} }
#endif
/* enable transmits in the hardware, need to do this /* enable transmits in the hardware, need to do this
* after setting TARC0 */ * after setting TARC0 */
@ -2882,7 +2872,6 @@ static int
e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb) struct sk_buff *skb)
{ {
#ifdef NETIF_F_TSO
struct e1000_context_desc *context_desc; struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info; struct e1000_buffer *buffer_info;
unsigned int i; unsigned int i;
@ -2911,7 +2900,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
0); 0);
cmd_length = E1000_TXD_CMD_IP; cmd_length = E1000_TXD_CMD_IP;
ipcse = skb->h.raw - skb->data - 1; ipcse = skb->h.raw - skb->data - 1;
#ifdef NETIF_F_TSO6
} else if (skb->protocol == htons(ETH_P_IPV6)) { } else if (skb->protocol == htons(ETH_P_IPV6)) {
skb->nh.ipv6h->payload_len = 0; skb->nh.ipv6h->payload_len = 0;
skb->h.th->check = skb->h.th->check =
@ -2921,7 +2909,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
IPPROTO_TCP, IPPROTO_TCP,
0); 0);
ipcse = 0; ipcse = 0;
#endif
} }
ipcss = skb->nh.raw - skb->data; ipcss = skb->nh.raw - skb->data;
ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
@ -2954,8 +2941,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return TRUE; return TRUE;
} }
#endif
return FALSE; return FALSE;
} }
@ -3013,7 +2998,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
while (len) { while (len) {
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd); size = min(len, max_per_txd);
#ifdef NETIF_F_TSO
/* Workaround for Controller erratum -- /* Workaround for Controller erratum --
* descriptor for non-tso packet in a linear SKB that follows a * descriptor for non-tso packet in a linear SKB that follows a
* tso gets written back prematurely before the data is fully * tso gets written back prematurely before the data is fully
@ -3028,7 +3012,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
* in TSO mode. Append 4-byte sentinel desc */ * in TSO mode. Append 4-byte sentinel desc */
if (unlikely(mss && !nr_frags && size == len && size > 8)) if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4; size -= 4;
#endif
/* work-around for errata 10 and it applies /* work-around for errata 10 and it applies
* to all controllers in PCI-X mode * to all controllers in PCI-X mode
* The fix is to make sure that the first descriptor of a * The fix is to make sure that the first descriptor of a
@ -3070,12 +3053,10 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
while (len) { while (len) {
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd); size = min(len, max_per_txd);
#ifdef NETIF_F_TSO
/* Workaround for premature desc write-backs /* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */ * in TSO mode. Append 4-byte sentinel desc */
if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
size -= 4; size -= 4;
#endif
/* Workaround for potential 82544 hang in PCI-X. /* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned * Avoid terminating buffers within evenly-aligned
* dwords. */ * dwords. */
@ -3300,7 +3281,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (adapter->hw.mac_type >= e1000_82571) if (adapter->hw.mac_type >= e1000_82571)
max_per_txd = 8192; max_per_txd = 8192;
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
/* The controller does a simple calculation to /* The controller does a simple calculation to
* make sure there is enough room in the FIFO before * make sure there is enough room in the FIFO before
@ -3354,16 +3334,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
count++; count++;
count++; count++;
#else
if (skb->ip_summed == CHECKSUM_PARTIAL)
count++;
#endif
#ifdef NETIF_F_TSO
/* Controller Erratum workaround */ /* Controller Erratum workaround */
if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
count++; count++;
#endif
count += TXD_USE_COUNT(len, max_txd_pwr); count += TXD_USE_COUNT(len, max_txd_pwr);

View File

@ -1576,12 +1576,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[nr] = skb; np->tx_skbuff[nr] = skb;
#ifdef NETIF_F_TSO
if (skb_is_gso(skb)) if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else else
#endif tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
/* vlan tag */ /* vlan tag */
@ -4475,9 +4473,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->rx_csum = 1; np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
#ifdef NETIF_F_TSO
dev->features |= NETIF_F_TSO; dev->features |= NETIF_F_TSO;
#endif
} }
np->vlanctl_bits = 0; np->vlanctl_bits = 0;

View File

@ -61,9 +61,7 @@
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#ifdef NETIF_F_TSO
#include <net/checksum.h> #include <net/checksum.h>
#endif
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>

View File

@ -82,10 +82,8 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
{"tx_restart_queue", IXGB_STAT(restart_queue) }, {"tx_restart_queue", IXGB_STAT(restart_queue) },
{"rx_long_length_errors", IXGB_STAT(stats.roc)}, {"rx_long_length_errors", IXGB_STAT(stats.roc)},
{"rx_short_length_errors", IXGB_STAT(stats.ruc)}, {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
#ifdef NETIF_F_TSO
{"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)}, {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
{"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)}, {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
#endif
{"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)}, {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
{"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)}, {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
{"tx_flow_control_xon", IXGB_STAT(stats.xontxc)}, {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
@ -240,7 +238,6 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
return 0; return 0;
} }
#ifdef NETIF_F_TSO
static int static int
ixgb_set_tso(struct net_device *netdev, uint32_t data) ixgb_set_tso(struct net_device *netdev, uint32_t data)
{ {
@ -250,7 +247,6 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO;
return 0; return 0;
} }
#endif /* NETIF_F_TSO */
static uint32_t static uint32_t
ixgb_get_msglevel(struct net_device *netdev) ixgb_get_msglevel(struct net_device *netdev)
@ -722,10 +718,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
.set_sg = ethtool_op_set_sg, .set_sg = ethtool_op_set_sg,
.get_msglevel = ixgb_get_msglevel, .get_msglevel = ixgb_get_msglevel,
.set_msglevel = ixgb_set_msglevel, .set_msglevel = ixgb_set_msglevel,
#ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso, .get_tso = ethtool_op_get_tso,
.set_tso = ixgb_set_tso, .set_tso = ixgb_set_tso,
#endif
.get_strings = ixgb_get_strings, .get_strings = ixgb_get_strings,
.phys_id = ixgb_phys_id, .phys_id = ixgb_phys_id,
.get_stats_count = ixgb_get_stats_count, .get_stats_count = ixgb_get_stats_count,

View File

@ -456,9 +456,7 @@ ixgb_probe(struct pci_dev *pdev,
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_FILTER;
#ifdef NETIF_F_TSO
netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO;
#endif
#ifdef NETIF_F_LLTX #ifdef NETIF_F_LLTX
netdev->features |= NETIF_F_LLTX; netdev->features |= NETIF_F_LLTX;
#endif #endif
@ -1176,7 +1174,6 @@ ixgb_watchdog(unsigned long data)
static int static int
ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
{ {
#ifdef NETIF_F_TSO
struct ixgb_context_desc *context_desc; struct ixgb_context_desc *context_desc;
unsigned int i; unsigned int i;
uint8_t ipcss, ipcso, tucss, tucso, hdr_len; uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
@ -1233,7 +1230,6 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
return 1; return 1;
} }
#endif
return 0; return 0;
} }

View File

@ -1412,10 +1412,8 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
.set_tx_csum = ethtool_op_set_tx_hw_csum, .set_tx_csum = ethtool_op_set_tx_hw_csum,
.get_sg = ethtool_op_get_sg, .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg, .set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso, .get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso, .set_tso = ethtool_op_set_tso,
#endif
.get_strings = myri10ge_get_strings, .get_strings = myri10ge_get_strings,
.get_stats_count = myri10ge_get_stats_count, .get_stats_count = myri10ge_get_stats_count,
.get_ethtool_stats = myri10ge_get_ethtool_stats, .get_ethtool_stats = myri10ge_get_ethtool_stats,
@ -1975,13 +1973,11 @@ again:
mss = 0; mss = 0;
max_segments = MXGEFW_MAX_SEND_DESC; max_segments = MXGEFW_MAX_SEND_DESC;
#ifdef NETIF_F_TSO
if (skb->len > (dev->mtu + ETH_HLEN)) { if (skb->len > (dev->mtu + ETH_HLEN)) {
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
if (mss != 0) if (mss != 0)
max_segments = MYRI10GE_MAX_SEND_DESC_TSO; max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
} }
#endif /*NETIF_F_TSO */
if ((unlikely(avail < max_segments))) { if ((unlikely(avail < max_segments))) {
/* we are out of transmit resources */ /* we are out of transmit resources */
@ -2013,7 +2009,6 @@ again:
cum_len = 0; cum_len = 0;
#ifdef NETIF_F_TSO
if (mss) { /* TSO */ if (mss) { /* TSO */
/* this removes any CKSUM flag from before */ /* this removes any CKSUM flag from before */
flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST); flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
@ -2029,7 +2024,6 @@ again:
* the checksum by parsing the header. */ * the checksum by parsing the header. */
pseudo_hdr_offset = mss; pseudo_hdr_offset = mss;
} else } else
#endif /*NETIF_F_TSO */
/* Mark small packets, and pad out tiny packets */ /* Mark small packets, and pad out tiny packets */
if (skb->len <= MXGEFW_SEND_SMALL_SIZE) { if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
flags |= MXGEFW_FLAGS_SMALL; flags |= MXGEFW_FLAGS_SMALL;
@ -2097,7 +2091,6 @@ again:
seglen = len; seglen = len;
flags_next = flags & ~MXGEFW_FLAGS_FIRST; flags_next = flags & ~MXGEFW_FLAGS_FIRST;
cum_len_next = cum_len + seglen; cum_len_next = cum_len + seglen;
#ifdef NETIF_F_TSO
if (mss) { /* TSO */ if (mss) { /* TSO */
(req - rdma_count)->rdma_count = rdma_count + 1; (req - rdma_count)->rdma_count = rdma_count + 1;
@ -2124,7 +2117,6 @@ again:
(small * MXGEFW_FLAGS_SMALL); (small * MXGEFW_FLAGS_SMALL);
} }
} }
#endif /* NETIF_F_TSO */
req->addr_high = high_swapped; req->addr_high = high_swapped;
req->addr_low = htonl(low); req->addr_low = htonl(low);
req->pseudo_hdr_offset = htons(pseudo_hdr_offset); req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
@ -2161,14 +2153,12 @@ again:
} }
(req - rdma_count)->rdma_count = rdma_count; (req - rdma_count)->rdma_count = rdma_count;
#ifdef NETIF_F_TSO
if (mss) if (mss)
do { do {
req--; req--;
req->flags |= MXGEFW_FLAGS_TSO_LAST; req->flags |= MXGEFW_FLAGS_TSO_LAST;
} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
MXGEFW_FLAGS_FIRST))); MXGEFW_FLAGS_FIRST)));
#endif
idx = ((count - 1) + tx->req) & tx->mask; idx = ((count - 1) + tx->req) & tx->mask;
tx->info[idx].last = 1; tx->info[idx].last = 1;
if (tx->wc_fifo == NULL) if (tx->wc_fifo == NULL)

View File

@ -3887,12 +3887,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
} }
offload_type = s2io_offload_type(skb); offload_type = s2io_offload_type(skb);
#ifdef NETIF_F_TSO
if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
txdp->Control_1 |= TXD_TCP_LSO_EN; txdp->Control_1 |= TXD_TCP_LSO_EN;
txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
} }
#endif
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
txdp->Control_2 |= txdp->Control_2 |=
(TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@ -5750,10 +5748,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.set_tx_csum = s2io_ethtool_op_set_tx_csum, .set_tx_csum = s2io_ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg, .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg, .set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
.get_tso = s2io_ethtool_op_get_tso, .get_tso = s2io_ethtool_op_get_tso,
.set_tso = s2io_ethtool_op_set_tso, .set_tso = s2io_ethtool_op_set_tso,
#endif
.get_ufo = ethtool_op_get_ufo, .get_ufo = ethtool_op_get_ufo,
.set_ufo = ethtool_op_set_ufo, .set_ufo = ethtool_op_set_ufo,
.self_test_count = s2io_ethtool_self_test_count, .self_test_count = s2io_ethtool_self_test_count,
@ -6978,12 +6974,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
if (sp->high_dma_flag == TRUE) if (sp->high_dma_flag == TRUE)
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
#ifdef NETIF_F_TSO
dev->features |= NETIF_F_TSO; dev->features |= NETIF_F_TSO;
#endif
#ifdef NETIF_F_TSO6
dev->features |= NETIF_F_TSO6; dev->features |= NETIF_F_TSO6;
#endif
if (sp->device_type & XFRAME_II_DEVICE) { if (sp->device_type & XFRAME_II_DEVICE) {
dev->features |= NETIF_F_UFO; dev->features |= NETIF_F_UFO;
dev->features |= NETIF_F_HW_CSUM; dev->features |= NETIF_F_HW_CSUM;

View File

@ -58,11 +58,7 @@
#define TG3_VLAN_TAG_USED 0 #define TG3_VLAN_TAG_USED 0
#endif #endif
#ifdef NETIF_F_TSO
#define TG3_TSO_SUPPORT 1 #define TG3_TSO_SUPPORT 1
#else
#define TG3_TSO_SUPPORT 0
#endif
#include "tg3.h" #include "tg3.h"
@ -3873,7 +3869,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tp->tx_prod; entry = tp->tx_prod;
base_flags = 0; base_flags = 0;
#if TG3_TSO_SUPPORT != 0
mss = 0; mss = 0;
if (skb->len > (tp->dev->mtu + ETH_HLEN) && if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
(mss = skb_shinfo(skb)->gso_size) != 0) { (mss = skb_shinfo(skb)->gso_size) != 0) {
@ -3906,11 +3901,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
else if (skb->ip_summed == CHECKSUM_PARTIAL) else if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM; base_flags |= TXD_FLAG_TCPUDP_CSUM;
#else
mss = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
#endif
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN | base_flags |= (TXD_FLAG_VLAN |
@ -3970,7 +3960,6 @@ out_unlock:
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
#if TG3_TSO_SUPPORT != 0
static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
/* Use GSO to workaround a rare TSO bug that may be triggered when the /* Use GSO to workaround a rare TSO bug that may be triggered when the
@ -4002,7 +3991,6 @@ tg3_tso_bug_end:
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
#endif
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
* support TG3_FLG2_HW_TSO_1 or firmware TSO only. * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
@ -4036,7 +4024,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
base_flags = 0; base_flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM; base_flags |= TXD_FLAG_TCPUDP_CSUM;
#if TG3_TSO_SUPPORT != 0
mss = 0; mss = 0;
if (skb->len > (tp->dev->mtu + ETH_HLEN) && if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
(mss = skb_shinfo(skb)->gso_size) != 0) { (mss = skb_shinfo(skb)->gso_size) != 0) {
@ -4091,9 +4078,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
} }
} }
} }
#else
mss = 0;
#endif
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN | base_flags |= (TXD_FLAG_VLAN |
@ -5329,7 +5313,6 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
return 0; return 0;
} }
#if TG3_TSO_SUPPORT != 0
#define TG3_TSO_FW_RELEASE_MAJOR 0x1 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
#define TG3_TSO_FW_RELASE_MINOR 0x6 #define TG3_TSO_FW_RELASE_MINOR 0x6
@ -5906,7 +5889,6 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
return 0; return 0;
} }
#endif /* TG3_TSO_SUPPORT != 0 */
/* tp->lock is held. */ /* tp->lock is held. */
static void __tg3_set_mac_addr(struct tg3 *tp) static void __tg3_set_mac_addr(struct tg3 *tp)
@ -6120,7 +6102,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
} }
#if TG3_TSO_SUPPORT != 0
else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
int fw_len; int fw_len;
@ -6135,7 +6116,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(BUFMGR_MB_POOL_SIZE, tw32(BUFMGR_MB_POOL_SIZE,
NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
} }
#endif
if (tp->dev->mtu <= ETH_DATA_LEN) { if (tp->dev->mtu <= ETH_DATA_LEN) {
tw32(BUFMGR_MB_RDMA_LOW_WATER, tw32(BUFMGR_MB_RDMA_LOW_WATER,
@ -6337,10 +6317,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
rdmac_mode |= (1 << 27); rdmac_mode |= (1 << 27);
#endif
/* Receive/send statistics. */ /* Receive/send statistics. */
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
@ -6511,10 +6489,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
#endif
tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE); tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
@ -6524,13 +6500,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err; return err;
} }
#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
err = tg3_load_tso_firmware(tp); err = tg3_load_tso_firmware(tp);
if (err) if (err)
return err; return err;
} }
#endif
tp->tx_mode = TX_MODE_ENABLE; tp->tx_mode = TX_MODE_ENABLE;
tw32_f(MAC_TX_MODE, tp->tx_mode); tw32_f(MAC_TX_MODE, tp->tx_mode);
@ -8062,7 +8036,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value)
tp->msg_enable = value; tp->msg_enable = value;
} }
#if TG3_TSO_SUPPORT != 0
static int tg3_set_tso(struct net_device *dev, u32 value) static int tg3_set_tso(struct net_device *dev, u32 value)
{ {
struct tg3 *tp = netdev_priv(dev); struct tg3 *tp = netdev_priv(dev);
@ -8081,7 +8054,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
} }
return ethtool_op_set_tso(dev, value); return ethtool_op_set_tso(dev, value);
} }
#endif
static int tg3_nway_reset(struct net_device *dev) static int tg3_nway_reset(struct net_device *dev)
{ {
@ -9212,10 +9184,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.set_tx_csum = tg3_set_tx_csum, .set_tx_csum = tg3_set_tx_csum,
.get_sg = ethtool_op_get_sg, .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg, .set_sg = ethtool_op_set_sg,
#if TG3_TSO_SUPPORT != 0
.get_tso = ethtool_op_get_tso, .get_tso = ethtool_op_get_tso,
.set_tso = tg3_set_tso, .set_tso = tg3_set_tso,
#endif
.self_test_count = tg3_get_test_count, .self_test_count = tg3_get_test_count,
.self_test = tg3_self_test, .self_test = tg3_self_test,
.get_strings = tg3_get_strings, .get_strings = tg3_get_strings,
@ -11856,7 +11826,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp); tg3_init_bufmgr_config(tp);
#if TG3_TSO_SUPPORT != 0
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
} }
@ -11881,7 +11850,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
dev->features |= NETIF_F_TSO6; dev->features |= NETIF_F_TSO6;
} }
#endif
if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&