drivers/net/qla3xxx: trim trailing whitespace

Also, hopefully, change the file permissions to 0644.

Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
Jeff Garzik 2007-10-03 13:52:23 -04:00 committed by David S. Miller
parent 07c2c76e27
commit 9ddf7774b9
2 changed files with 67 additions and 67 deletions

128
drivers/net/qla3xxx.c Executable file → Normal file
View File

@ -81,7 +81,7 @@ typedef enum {
} PHY_DEVICE_et; } PHY_DEVICE_et;
typedef struct { typedef struct {
PHY_DEVICE_et phyDevice; PHY_DEVICE_et phyDevice;
u32 phyIdOUI; u32 phyIdOUI;
u16 phyIdModel; u16 phyIdModel;
char *name; char *name;
@ -330,7 +330,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
err = pci_dma_mapping_error(map); err = pci_dma_mapping_error(map);
if(err) { if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err); qdev->ndev->name, err);
dev_kfree_skb(lrg_buf_cb->skb); dev_kfree_skb(lrg_buf_cb->skb);
lrg_buf_cb->skb = NULL; lrg_buf_cb->skb = NULL;
@ -884,14 +884,14 @@ static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
u16 reg; u16 reg;
/* Enable Auto-negotiation sense */ /* Enable Auto-negotiation sense */
ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
PHYAddr[qdev->mac_index]); PHYAddr[qdev->mac_index]);
reg |= PETBI_TBI_AUTO_SENSE; reg |= PETBI_TBI_AUTO_SENSE;
ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
PHYAddr[qdev->mac_index]); PHYAddr[qdev->mac_index]);
ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
PHYAddr[qdev->mac_index]); PHYAddr[qdev->mac_index]);
ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
@ -945,7 +945,7 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
/* Write new PHYAD w/bit 5 set */ /* Write new PHYAD w/bit 5 set */
ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
/* /*
* Disable diagnostic mode bit 2 = 0 * Disable diagnostic mode bit 2 = 0
* Power up device bit 11 = 0 * Power up device bit 11 = 0
* Link up (on) and activity (blink) * Link up (on) and activity (blink)
@ -955,18 +955,18 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
ql_mii_write_reg(qdev, 0x1c, 0xfaf0); ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
} }
static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
u16 phyIdReg0, u16 phyIdReg1) u16 phyIdReg0, u16 phyIdReg1)
{ {
PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
u32 oui; u32 oui;
u16 model; u16 model;
int i; int i;
if (phyIdReg0 == 0xffff) { if (phyIdReg0 == 0xffff) {
return result; return result;
} }
if (phyIdReg1 == 0xffff) { if (phyIdReg1 == 0xffff) {
return result; return result;
} }
@ -984,7 +984,7 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
printk(KERN_INFO "%s: Phy: %s\n", printk(KERN_INFO "%s: Phy: %s\n",
qdev->ndev->name, PHY_DEVICES[i].name); qdev->ndev->name, PHY_DEVICES[i].name);
break; break;
} }
} }
@ -1033,7 +1033,7 @@ static int ql_is_full_dup(struct ql3_adapter *qdev)
{ {
if (ql_mii_read_reg(qdev, 0x1A, &reg)) if (ql_mii_read_reg(qdev, 0x1A, &reg))
return 0; return 0;
return ((reg & 0x0080) && (reg & 0x1000)) != 0; return ((reg & 0x0080) && (reg & 0x1000)) != 0;
} }
case PHY_VITESSE_VSC8211: case PHY_VITESSE_VSC8211:
@ -1082,19 +1082,19 @@ static int PHY_Setup(struct ql3_adapter *qdev)
/* Check if we have a Agere PHY */ /* Check if we have a Agere PHY */
if ((reg1 == 0xffff) || (reg2 == 0xffff)) { if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
/* Determine which MII address we should be using /* Determine which MII address we should be using
determined by the index of the card */ determined by the index of the card */
if (qdev->mac_index == 0) { if (qdev->mac_index == 0) {
miiAddr = MII_AGERE_ADDR_1; miiAddr = MII_AGERE_ADDR_1;
} else { } else {
miiAddr = MII_AGERE_ADDR_2; miiAddr = MII_AGERE_ADDR_2;
} }
err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
if(err != 0) { if(err != 0) {
printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
qdev->ndev->name); qdev->ndev->name);
return err; return err;
} }
err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
@ -1103,9 +1103,9 @@ static int PHY_Setup(struct ql3_adapter *qdev)
qdev->ndev->name); qdev->ndev->name);
return err; return err;
} }
/* We need to remember to initialize the Agere PHY */ /* We need to remember to initialize the Agere PHY */
agereAddrChangeNeeded = true; agereAddrChangeNeeded = true;
} }
/* Determine the particular PHY we have on board to apply /* Determine the particular PHY we have on board to apply
@ -1114,7 +1114,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
/* need this here so address gets changed */ /* need this here so address gets changed */
phyAgereSpecificInit(qdev, miiAddr); phyAgereSpecificInit(qdev, miiAddr);
} else if (qdev->phyType == PHY_TYPE_UNKNOWN) { } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
return -EIO; return -EIO;
@ -1427,7 +1427,7 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
static void ql_phy_reset_ex(struct ql3_adapter *qdev) static void ql_phy_reset_ex(struct ql3_adapter *qdev)
{ {
ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
PHYAddr[qdev->mac_index]); PHYAddr[qdev->mac_index]);
} }
@ -1438,7 +1438,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
if(qdev->phyType == PHY_AGERE_ET1011C) { if(qdev->phyType == PHY_AGERE_ET1011C) {
/* turn off external loopback */ /* turn off external loopback */
ql_mii_write_reg(qdev, 0x13, 0x0000); ql_mii_write_reg(qdev, 0x13, 0x0000);
} }
if(qdev->mac_index == 0) if(qdev->mac_index == 0)
@ -1452,23 +1452,23 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
portConfiguration = PORT_CONFIG_DEFAULT; portConfiguration = PORT_CONFIG_DEFAULT;
/* Set the 1000 advertisements */ /* Set the 1000 advertisements */
ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg, ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
PHYAddr[qdev->mac_index]); PHYAddr[qdev->mac_index]);
reg &= ~PHY_GIG_ALL_PARAMS; reg &= ~PHY_GIG_ALL_PARAMS;
if(portConfiguration & if(portConfiguration &
PORT_CONFIG_FULL_DUPLEX_ENABLED & PORT_CONFIG_FULL_DUPLEX_ENABLED &
PORT_CONFIG_1000MB_SPEED) { PORT_CONFIG_1000MB_SPEED) {
reg |= PHY_GIG_ADV_1000F; reg |= PHY_GIG_ADV_1000F;
} }
if(portConfiguration & if(portConfiguration &
PORT_CONFIG_HALF_DUPLEX_ENABLED & PORT_CONFIG_HALF_DUPLEX_ENABLED &
PORT_CONFIG_1000MB_SPEED) { PORT_CONFIG_1000MB_SPEED) {
reg |= PHY_GIG_ADV_1000H; reg |= PHY_GIG_ADV_1000H;
} }
ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
PHYAddr[qdev->mac_index]); PHYAddr[qdev->mac_index]);
/* Set the 10/100 & pause negotiation advertisements */ /* Set the 10/100 & pause negotiation advertisements */
@ -1482,7 +1482,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
if(portConfiguration & PORT_CONFIG_100MB_SPEED) if(portConfiguration & PORT_CONFIG_100MB_SPEED)
reg |= PHY_NEG_ADV_100F; reg |= PHY_NEG_ADV_100F;
if(portConfiguration & PORT_CONFIG_10MB_SPEED) if(portConfiguration & PORT_CONFIG_10MB_SPEED)
reg |= PHY_NEG_ADV_10F; reg |= PHY_NEG_ADV_10F;
} }
@ -1490,22 +1490,22 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
if(portConfiguration & PORT_CONFIG_100MB_SPEED) if(portConfiguration & PORT_CONFIG_100MB_SPEED)
reg |= PHY_NEG_ADV_100H; reg |= PHY_NEG_ADV_100H;
if(portConfiguration & PORT_CONFIG_10MB_SPEED) if(portConfiguration & PORT_CONFIG_10MB_SPEED)
reg |= PHY_NEG_ADV_10H; reg |= PHY_NEG_ADV_10H;
} }
if(portConfiguration & if(portConfiguration &
PORT_CONFIG_1000MB_SPEED) { PORT_CONFIG_1000MB_SPEED) {
reg |= 1; reg |= 1;
} }
ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
PHYAddr[qdev->mac_index]); PHYAddr[qdev->mac_index]);
ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]); ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
ql_mii_write_reg_ex(qdev, CONTROL_REG, ql_mii_write_reg_ex(qdev, CONTROL_REG,
reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
PHYAddr[qdev->mac_index]); PHYAddr[qdev->mac_index]);
} }
@ -1660,7 +1660,7 @@ static void ql_link_state_machine(struct ql3_adapter *qdev)
"%s: Reset in progress, skip processing link " "%s: Reset in progress, skip processing link "
"state.\n", qdev->ndev->name); "state.\n", qdev->ndev->name);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
return; return;
} }
@ -1752,7 +1752,7 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
return -1; return -1;
if (qdev->device_id == QL3032_DEVICE_ID) if (qdev->device_id == QL3032_DEVICE_ID)
ql_write_page0_reg(qdev, ql_write_page0_reg(qdev,
&port_regs->macMIIMgmtControlReg, 0x0f00000); &port_regs->macMIIMgmtControlReg, 0x0f00000);
/* Divide 125MHz clock by 28 to meet PHY timing requirements */ /* Divide 125MHz clock by 28 to meet PHY timing requirements */
@ -1936,7 +1936,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
err = pci_dma_mapping_error(map); err = pci_dma_mapping_error(map);
if(err) { if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err); qdev->ndev->name, err);
dev_kfree_skb(lrg_buf_cb->skb); dev_kfree_skb(lrg_buf_cb->skb);
lrg_buf_cb->skb = NULL; lrg_buf_cb->skb = NULL;
@ -2044,7 +2044,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
} }
tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
/* Check the transmit response flags for any errors */ /* Check the transmit response flags for any errors */
@ -2108,13 +2108,13 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
/* /*
* The difference between 3022 and 3032 for inbound completions: * The difference between 3022 and 3032 for inbound completions:
* 3022 uses two buffers per completion. The first buffer contains * 3022 uses two buffers per completion. The first buffer contains
* (some) header info, the second the remainder of the headers plus * (some) header info, the second the remainder of the headers plus
* the data. For this chip we reserve some space at the top of the * the data. For this chip we reserve some space at the top of the
* receive buffer so that the header info in buffer one can be * receive buffer so that the header info in buffer one can be
* prepended to the buffer two. Buffer two is the sent up while * prepended to the buffer two. Buffer two is the sent up while
* buffer one is returned to the hardware to be reused. * buffer one is returned to the hardware to be reused.
* 3032 receives all of it's data and headers in one buffer for a * 3032 receives all of it's data and headers in one buffer for a
* simpler process. 3032 also supports checksum verification as * simpler process. 3032 also supports checksum verification as
* can be seen in ql_process_macip_rx_intr(). * can be seen in ql_process_macip_rx_intr().
*/ */
@ -2205,13 +2205,13 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb_push(skb2, size), size); skb_push(skb2, size), size);
} else { } else {
u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
if (checksum & if (checksum &
(IB_IP_IOCB_RSP_3032_ICE | (IB_IP_IOCB_RSP_3032_ICE |
IB_IP_IOCB_RSP_3032_CE)) { IB_IP_IOCB_RSP_3032_CE)) {
printk(KERN_ERR printk(KERN_ERR
"%s: Bad checksum for this %s packet, checksum = %x.\n", "%s: Bad checksum for this %s packet, checksum = %x.\n",
__func__, __func__,
((checksum & ((checksum &
IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
"UDP"),checksum); "UDP"),checksum);
} else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
@ -2394,12 +2394,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
} }
/* /*
* Get the total number of segments needed for the * Get the total number of segments needed for the
* given number of fragments. This is necessary because * given number of fragments. This is necessary because
* outbound address lists (OAL) will be used when more than * outbound address lists (OAL) will be used when more than
* two frags are given. Each address list has 5 addr/len * two frags are given. Each address list has 5 addr/len
* pairs. The 5th pair in each AOL is used to point to * pairs. The 5th pair in each AOL is used to point to
* the next AOL if more frags are coming. * the next AOL if more frags are coming.
* That is why the frags:segment count ratio is not linear. * That is why the frags:segment count ratio is not linear.
*/ */
static int ql_get_seg_count(struct ql3_adapter *qdev, static int ql_get_seg_count(struct ql3_adapter *qdev,
@ -2476,12 +2476,12 @@ static int ql_send_map(struct ql3_adapter *qdev,
err = pci_dma_mapping_error(map); err = pci_dma_mapping_error(map);
if(err) { if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err); qdev->ndev->name, err);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
@ -2511,7 +2511,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
err = pci_dma_mapping_error(map); err = pci_dma_mapping_error(map);
if(err) { if(err) {
printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
qdev->ndev->name, err); qdev->ndev->name, err);
goto map_error; goto map_error;
} }
@ -2537,7 +2537,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
err = pci_dma_mapping_error(map); err = pci_dma_mapping_error(map);
if(err) { if(err) {
printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
qdev->ndev->name, err); qdev->ndev->name, err);
goto map_error; goto map_error;
} }
@ -2558,10 +2558,10 @@ static int ql_send_map(struct ql3_adapter *qdev,
map_error: map_error:
/* A PCI mapping failed and now we will need to back out /* A PCI mapping failed and now we will need to back out
* We need to traverse through the oal's and associated pages which * We need to traverse through the oal's and associated pages which
* have been mapped and now we must unmap them to clean up properly * have been mapped and now we must unmap them to clean up properly
*/ */
seg = 1; seg = 1;
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
oal = tx_cb->oal; oal = tx_cb->oal;
@ -2599,11 +2599,11 @@ map_error:
* The difference between 3022 and 3032 sends: * The difference between 3022 and 3032 sends:
* 3022 only supports a simple single segment transmission. * 3022 only supports a simple single segment transmission.
* 3032 supports checksumming and scatter/gather lists (fragments). * 3032 supports checksumming and scatter/gather lists (fragments).
* The 3032 supports sglists by using the 3 addr/len pairs (ALP) * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
* in the IOCB plus a chain of outbound address lists (OAL) that * in the IOCB plus a chain of outbound address lists (OAL) that
* each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
* will used to point to an OAL when more ALP entries are required. * will used to point to an OAL when more ALP entries are required.
* The IOCB is always the top of the chain followed by one or more * The IOCB is always the top of the chain followed by one or more
* OALs (when necessary). * OALs (when necessary).
*/ */
static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
@ -2617,14 +2617,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(atomic_read(&qdev->tx_count) < 2)) { if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
if((tx_cb->seg_count = ql_get_seg_count(qdev, if((tx_cb->seg_count = ql_get_seg_count(qdev,
(skb_shinfo(skb)->nr_frags))) == -1) { (skb_shinfo(skb)->nr_frags))) == -1) {
printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
mac_iocb_ptr = tx_cb->queue_entry; mac_iocb_ptr = tx_cb->queue_entry;
memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
mac_iocb_ptr->opcode = qdev->mac_ob_opcode; mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
@ -2636,12 +2636,12 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
if (qdev->device_id == QL3032_DEVICE_ID && if (qdev->device_id == QL3032_DEVICE_ID &&
skb->ip_summed == CHECKSUM_PARTIAL) skb->ip_summed == CHECKSUM_PARTIAL)
ql_hw_csum_setup(skb, mac_iocb_ptr); ql_hw_csum_setup(skb, mac_iocb_ptr);
if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
wmb(); wmb();
qdev->req_producer_index++; qdev->req_producer_index++;
if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
@ -2739,7 +2739,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
"%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
return -ENOMEM; return -ENOMEM;
} }
qdev->lrg_buf_q_alloc_virt_addr = qdev->lrg_buf_q_alloc_virt_addr =
pci_alloc_consistent(qdev->pdev, pci_alloc_consistent(qdev->pdev,
qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_size,

6
drivers/net/qla3xxx.h Executable file → Normal file
View File

@ -556,7 +556,7 @@ enum {
IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
IP_ADDR_INDEX_REG_6 = 0x0008, IP_ADDR_INDEX_REG_6 = 0x0008,
IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
IP_ADDR_INDEX_REG_E = 0x0040, IP_ADDR_INDEX_REG_E = 0x0040,
}; };
enum { enum {
QL3032_PORT_CONTROL_DS = 0x0001, QL3032_PORT_CONTROL_DS = 0x0001,
@ -1112,7 +1112,7 @@ struct ql_rcv_buf_cb {
* OAL has 5 entries: * OAL has 5 entries:
* 1 thru 4 point to frags * 1 thru 4 point to frags
* fifth points to next oal. * fifth points to next oal.
*/ */
#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) #define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
struct oal_entry { struct oal_entry {
@ -1137,7 +1137,7 @@ struct ql_tx_buf_cb {
struct ob_mac_iocb_req *queue_entry ; struct ob_mac_iocb_req *queue_entry ;
int seg_count; int seg_count;
struct oal *oal; struct oal *oal;
struct map_list map[MAX_SKB_FRAGS+1]; struct map_list map[MAX_SKB_FRAGS+1];
}; };
/* definitions for type field */ /* definitions for type field */