[SCSI] csiostor: Chelsio FCoE offload driver

Signed-off-by: Naresh Kumar Inna <naresh@chelsio.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
Naresh Kumar Inna 2012-11-15 22:41:18 +05:30 committed by James Bottomley
parent ce91a9234c
commit a3667aaed5
22 changed files with 19175 additions and 0 deletions

View File

@ -1812,6 +1812,7 @@ config SCSI_VIRTIO
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.
source "drivers/scsi/csiostor/Kconfig"
endif # SCSI_LOWLEVEL

View File

@ -90,6 +90,7 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o

View File

@ -0,0 +1,19 @@
config SCSI_CHELSIO_FCOE
tristate "Chelsio Communications FCoE support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
select FW_LOADER
help
This driver supports FCoE Offload functionality over
Chelsio T4-based 10Gb Converged Network Adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module choose M here; the module
will be called csiostor.

View File

@ -0,0 +1,11 @@
#
## Chelsio FCoE driver
#
##
ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o

View File

@ -0,0 +1,796 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
#include <scsi/fc/fc_fs.h>
#include "csio_init.h"
static void
csio_vport_set_state(struct csio_lnode *ln);
/*
* csio_reg_rnode - Register a remote port with FC transport.
* @rn: Rnode representing remote port.
*
* Call fc_remote_port_add() to register this remote port with FC transport.
* If remote port is Initiator OR Target OR both, change the role appropriately.
*
*/
void
csio_reg_rnode(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
struct Scsi_Host *shost = csio_ln_to_shost(ln);
struct fc_rport_identifiers ids;
struct fc_rport *rport;
struct csio_service_parms *sp;
ids.node_name = wwn_to_u64(csio_rn_wwnn(rn));
ids.port_name = wwn_to_u64(csio_rn_wwpn(rn));
ids.port_id = rn->nport_id;
ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {
rport = rn->rport;
CSIO_ASSERT(rport != NULL);
goto update_role;
}
rn->rport = fc_remote_port_add(shost, 0, &ids);
if (!rn->rport) {
csio_ln_err(ln, "Failed to register rport = 0x%x.\n",
rn->nport_id);
return;
}
ln->num_reg_rnodes++;
rport = rn->rport;
spin_lock_irq(shost->host_lock);
*((struct csio_rnode **)rport->dd_data) = rn;
spin_unlock_irq(shost->host_lock);
sp = &rn->rn_sparm;
rport->maxframe_size = sp->csp.sp_bb_data;
if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID)
rport->supported_classes = FC_COS_CLASS3;
else
rport->supported_classes = FC_COS_UNSPECIFIED;
update_role:
if (rn->role & CSIO_RNFR_INITIATOR)
ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (rn->role & CSIO_RNFR_TARGET)
ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
if (ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, ids.roles);
rn->scsi_id = rport->scsi_target_id;
csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",
rn->nport_id, ids.roles);
}
/*
* csio_unreg_rnode - Unregister a remote port with FC transport.
* @rn: Rnode representing remote port.
*
* Call fc_remote_port_delete() to unregister this remote port with FC
* transport.
*
*/
void
csio_unreg_rnode(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
struct fc_rport *rport = rn->rport;
rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);
fc_remote_port_delete(rport);
ln->num_reg_rnodes--;
csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id);
}
/*
* csio_lnode_async_event - Async events from local port.
* @ln: lnode representing local port.
*
* Async events from local node that FC transport/SCSI ML
* should be made aware of (Eg: RSCN).
*/
void
csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt)
{
switch (fc_evt) {
case CSIO_LN_FC_RSCN:
/* Get payload of rscn from ln */
/* For each RSCN entry */
/*
* fc_host_post_event(shost,
* fc_get_event_number(),
* FCH_EVT_RSCN,
* rscn_entry);
*/
break;
case CSIO_LN_FC_LINKUP:
/* send fc_host_post_event */
/* set vport state */
if (csio_is_npiv_ln(ln))
csio_vport_set_state(ln);
break;
case CSIO_LN_FC_LINKDOWN:
/* send fc_host_post_event */
/* set vport state */
if (csio_is_npiv_ln(ln))
csio_vport_set_state(ln);
break;
case CSIO_LN_FC_ATTRIB_UPDATE:
csio_fchost_attr_init(ln);
break;
default:
break;
}
}
/*
* csio_fchost_attr_init - Initialize FC transport attributes
* @ln: Lnode.
*
*/
void
csio_fchost_attr_init(struct csio_lnode *ln)
{
struct Scsi_Host *shost = csio_ln_to_shost(ln);
fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln));
fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln));
fc_host_supported_classes(shost) = FC_COS_CLASS3;
fc_host_max_npiv_vports(shost) =
(csio_lnode_to_hw(ln))->fres_info.max_vnps;
fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT |
FC_PORTSPEED_1GBIT;
fc_host_maxframe_size(shost) = ln->ln_sparm.csp.sp_bb_data;
memset(fc_host_supported_fc4s(shost), 0,
sizeof(fc_host_supported_fc4s(shost)));
fc_host_supported_fc4s(shost)[7] = 1;
memset(fc_host_active_fc4s(shost), 0,
sizeof(fc_host_active_fc4s(shost)));
fc_host_active_fc4s(shost)[7] = 1;
}
/*
* csio_get_host_port_id - sysfs entries for nport_id is
* populated/cached from this function
*/
static void
csio_get_host_port_id(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
fc_host_port_id(shost) = ln->nport_id;
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_port_type - Return FC local port type.
* @shost: scsi host.
*
*/
static void
csio_get_host_port_type(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
if (csio_is_npiv_ln(ln))
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
else
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_port_state - Return FC local port state.
* @shost: scsi host.
*
*/
static void
csio_get_host_port_state(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
char state[16];
spin_lock_irq(&hw->lock);
csio_lnode_state_to_str(ln, state);
if (!strcmp(state, "READY"))
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
else if (!strcmp(state, "OFFLINE"))
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
else
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_host_speed - Return link speed to FC transport.
* @shost: scsi host.
*
*/
static void
csio_get_host_speed(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
switch (hw->pport[ln->portid].link_speed) {
case FW_PORT_CAP_SPEED_1G:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
break;
case FW_PORT_CAP_SPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_host_fabric_name - Return fabric name
* @shost: scsi host.
*
*/
static void
csio_get_host_fabric_name(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_rnode *rn = NULL;
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI);
if (rn)
fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn));
else
fc_host_fabric_name(shost) = 0;
spin_unlock_irq(&hw->lock);
}
/*
* csio_get_host_speed - Return FC transport statistics.
* @ln: Lnode.
*
*/
static struct fc_host_statistics *
csio_get_stats(struct Scsi_Host *shost)
{
struct csio_lnode *ln = shost_priv(shost);
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct fc_host_statistics *fhs = &ln->fch_stats;
struct fw_fcoe_port_stats fcoe_port_stats;
uint64_t seconds;
memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats));
csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats);
fhs->tx_frames += (fcoe_port_stats.tx_bcast_frames +
fcoe_port_stats.tx_mcast_frames +
fcoe_port_stats.tx_ucast_frames +
fcoe_port_stats.tx_offload_frames);
fhs->tx_words += (fcoe_port_stats.tx_bcast_bytes +
fcoe_port_stats.tx_mcast_bytes +
fcoe_port_stats.tx_ucast_bytes +
fcoe_port_stats.tx_offload_bytes) /
CSIO_WORD_TO_BYTE;
fhs->rx_frames += (fcoe_port_stats.rx_bcast_frames +
fcoe_port_stats.rx_mcast_frames +
fcoe_port_stats.rx_ucast_frames);
fhs->rx_words += (fcoe_port_stats.rx_bcast_bytes +
fcoe_port_stats.rx_mcast_bytes +
fcoe_port_stats.rx_ucast_bytes) /
CSIO_WORD_TO_BYTE;
fhs->error_frames += fcoe_port_stats.rx_err_frames;
fhs->fcp_input_requests += ln->stats.n_input_requests;
fhs->fcp_output_requests += ln->stats.n_output_requests;
fhs->fcp_control_requests += ln->stats.n_control_requests;
fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20;
fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20;
fhs->link_failure_count = ln->stats.n_link_down;
/* Reset stats for the device */
seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start;
do_div(seconds, 1000);
fhs->seconds_since_last_reset = seconds;
return fhs;
}
/*
* csio_set_rport_loss_tmo - Set the rport dev loss timeout
* @rport: fc rport.
* @timeout: new value for dev loss tmo.
*
* If timeout is non zero set the dev_loss_tmo to timeout, else set
* dev_loss_tmo to one.
*/
static void
csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
}
static void
csio_vport_set_state(struct csio_lnode *ln)
{
struct fc_vport *fc_vport = ln->fc_vport;
struct csio_lnode *pln = ln->pln;
char state[16];
/* Set fc vport state based on phyiscal lnode */
csio_lnode_state_to_str(pln, state);
if (strcmp(state, "READY")) {
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
return;
}
if (!(pln->flags & CSIO_LNF_NPIVSUPP)) {
fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP);
return;
}
/* Set fc vport state based on virtual lnode */
csio_lnode_state_to_str(ln, state);
if (strcmp(state, "READY")) {
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
return;
}
fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}
static int
csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
{
struct csio_lnode *pln;
struct csio_mb *mbp;
struct fw_fcoe_vnp_cmd *rsp;
int ret = 0;
int retry = 0;
/* Issue VNP cmd to alloc vport */
/* Allocate Mbox request */
spin_lock_irq(&hw->lock);
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
CSIO_INC_STATS(hw, n_err_nomem);
ret = -ENOMEM;
goto out;
}
pln = ln->pln;
ln->fcf_flowid = pln->fcf_flowid;
ln->portid = pln->portid;
csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
pln->fcf_flowid, pln->vnp_flowid, 0,
csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL);
for (retry = 0; retry < 3; retry++) {
/* FW is expected to complete vnp cmd in immediate mode
* without much delay.
* Otherwise, there will be increase in IO latency since HW
* lock is held till completion of vnp mbox cmd.
*/
ret = csio_mb_issue(hw, mbp);
if (ret != -EBUSY)
break;
/* Retry if mbox returns busy */
spin_unlock_irq(&hw->lock);
msleep(2000);
spin_lock_irq(&hw->lock);
}
if (ret) {
csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
goto out_free;
}
/* Process Mbox response of VNP command */
rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
ret = -EINVAL;
goto out_free;
}
ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET(
ntohl(rsp->gen_wwn_to_vnpi));
memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid);
csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n",
ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1],
ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3],
ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5],
ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]);
csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n",
ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1],
ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3],
ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5],
ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]);
out_free:
mempool_free(mbp, hw->mb_mempool);
out:
spin_unlock_irq(&hw->lock);
return ret;
}
static int
csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
{
struct csio_lnode *pln;
struct csio_mb *mbp;
struct fw_fcoe_vnp_cmd *rsp;
int ret = 0;
int retry = 0;
/* Issue VNP cmd to free vport */
/* Allocate Mbox request */
spin_lock_irq(&hw->lock);
mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
if (!mbp) {
CSIO_INC_STATS(hw, n_err_nomem);
ret = -ENOMEM;
goto out;
}
pln = ln->pln;
csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
ln->fcf_flowid, ln->vnp_flowid,
NULL);
for (retry = 0; retry < 3; retry++) {
ret = csio_mb_issue(hw, mbp);
if (ret != -EBUSY)
break;
/* Retry if mbox returns busy */
spin_unlock_irq(&hw->lock);
msleep(2000);
spin_lock_irq(&hw->lock);
}
if (ret) {
csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
goto out_free;
}
/* Process Mbox response of VNP command */
rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
ret = -EINVAL;
}
out_free:
mempool_free(mbp, hw->mb_mempool);
out:
spin_unlock_irq(&hw->lock);
return ret;
}
static int
csio_vport_create(struct fc_vport *fc_vport, bool disable)
{
struct Scsi_Host *shost = fc_vport->shost;
struct csio_lnode *pln = shost_priv(shost);
struct csio_lnode *ln = NULL;
struct csio_hw *hw = csio_lnode_to_hw(pln);
uint8_t wwn[8];
int ret = -1;
ln = csio_shost_init(hw, &fc_vport->dev, false, pln);
if (!ln)
goto error;
if (fc_vport->node_name != 0) {
u64_to_wwn(fc_vport->node_name, wwn);
if (!CSIO_VALID_WWN(wwn)) {
csio_ln_err(ln,
"vport create failed. Invalid wwnn\n");
goto error;
}
memcpy(csio_ln_wwnn(ln), wwn, 8);
}
if (fc_vport->port_name != 0) {
u64_to_wwn(fc_vport->port_name, wwn);
if (!CSIO_VALID_WWN(wwn)) {
csio_ln_err(ln,
"vport create failed. Invalid wwpn\n");
goto error;
}
if (csio_lnode_lookup_by_wwpn(hw, wwn)) {
csio_ln_err(ln,
"vport create failed. wwpn already exists\n");
goto error;
}
memcpy(csio_ln_wwpn(ln), wwn, 8);
}
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
if (csio_fcoe_alloc_vnp(hw, ln))
goto error;
*(struct csio_lnode **)fc_vport->dd_data = ln;
ln->fc_vport = fc_vport;
if (!fc_vport->node_name)
fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
if (!fc_vport->port_name)
fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln));
csio_fchost_attr_init(ln);
return 0;
error:
if (ln)
csio_shost_exit(ln);
return ret;
}
static int
csio_vport_delete(struct fc_vport *fc_vport)
{
struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
struct Scsi_Host *shost = csio_ln_to_shost(ln);
struct csio_hw *hw = csio_lnode_to_hw(ln);
int rmv;
spin_lock_irq(&hw->lock);
rmv = csio_is_hw_removing(hw);
spin_unlock_irq(&hw->lock);
if (rmv) {
csio_shost_exit(ln);
return 0;
}
/* Quiesce ios and send remove event to lnode */
scsi_block_requests(shost);
spin_lock_irq(&hw->lock);
csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
csio_lnode_close(ln);
spin_unlock_irq(&hw->lock);
scsi_unblock_requests(shost);
/* Free vnp */
if (fc_vport->vport_state != FC_VPORT_DISABLED)
csio_fcoe_free_vnp(hw, ln);
csio_shost_exit(ln);
return 0;
}
static int
csio_vport_disable(struct fc_vport *fc_vport, bool disable)
{
struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
struct Scsi_Host *shost = csio_ln_to_shost(ln);
struct csio_hw *hw = csio_lnode_to_hw(ln);
/* disable vport */
if (disable) {
/* Quiesce ios and send stop event to lnode */
scsi_block_requests(shost);
spin_lock_irq(&hw->lock);
csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
csio_lnode_stop(ln);
spin_unlock_irq(&hw->lock);
scsi_unblock_requests(shost);
/* Free vnp */
csio_fcoe_free_vnp(hw, ln);
fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
csio_ln_err(ln, "vport disabled\n");
return 0;
} else {
/* enable vport */
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
if (csio_fcoe_alloc_vnp(hw, ln)) {
csio_ln_err(ln, "vport enabled failed.\n");
return -1;
}
csio_ln_err(ln, "vport enabled\n");
return 0;
}
}
static void
csio_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct csio_rnode *rn;
struct csio_hw *hw;
struct csio_lnode *ln;
rn = *((struct csio_rnode **)rport->dd_data);
ln = csio_rnode_to_lnode(rn);
hw = csio_lnode_to_hw(ln);
spin_lock_irq(&hw->lock);
/* return if driver is being removed or same rnode comes back online */
if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn))
goto out;
csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n",
rn, rn->nport_id, csio_rn_flowid(rn));
CSIO_INC_STATS(ln, n_dev_loss_tmo);
/*
* enqueue devloss event to event worker thread to serialize all
* rnode events.
*/
if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) {
CSIO_INC_STATS(hw, n_evt_drop);
goto out;
}
if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irq(&hw->lock);
schedule_work(&hw->evtq_work);
return;
}
out:
spin_unlock_irq(&hw->lock);
}
/* FC transport functions template - Physical port */
struct fc_function_template csio_fc_transport_funcs = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_maxframe_size = 1,
.get_host_port_id = csio_get_host_port_id,
.show_host_port_id = 1,
.get_host_port_type = csio_get_host_port_type,
.show_host_port_type = 1,
.get_host_port_state = csio_get_host_port_state,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
.get_host_speed = csio_get_host_speed,
.show_host_speed = 1,
.get_host_fabric_name = csio_get_host_fabric_name,
.show_host_fabric_name = 1,
.get_fc_host_stats = csio_get_stats,
.dd_fcrport_size = sizeof(struct csio_rnode *),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.show_starget_port_id = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
.dd_fcvport_size = sizeof(struct csio_lnode *),
.vport_create = csio_vport_create,
.vport_disable = csio_vport_disable,
.vport_delete = csio_vport_delete,
};
/* FC transport functions template - Virtual port */
struct fc_function_template csio_fc_transport_vport_funcs = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_maxframe_size = 1,
.get_host_port_id = csio_get_host_port_id,
.show_host_port_id = 1,
.get_host_port_type = csio_get_host_port_type,
.show_host_port_type = 1,
.get_host_port_state = csio_get_host_port_state,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
.get_host_speed = csio_get_host_speed,
.show_host_speed = 1,
.get_host_fabric_name = csio_get_host_fabric_name,
.show_host_fabric_name = 1,
.get_fc_host_stats = csio_get_stats,
.dd_fcrport_size = sizeof(struct csio_rnode *),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.show_starget_port_id = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
};

View File

@ -0,0 +1,121 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_DEFS_H__
#define __CSIO_DEFS_H__
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#define CSIO_INVALID_IDX 0xFFFFFFFF
#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++)
#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--)
#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false)
#define CSIO_DID_MASK 0xFFFFFF
#define CSIO_WORD_TO_BYTE 4
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
return readl(addr) + ((u64)readl(addr + 4) << 32);
}
static inline void writeq(u64 val, void __iomem *addr)
{
writel(val, addr);
writel(val >> 32, addr + 4);
}
#endif
static inline int
csio_list_deleted(struct list_head *list)
{
return ((list->next == list) && (list->prev == list));
}
#define csio_list_next(elem) (((struct list_head *)(elem))->next)
#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
/* State machine */
typedef void (*csio_sm_state_t)(void *, uint32_t);
struct csio_sm {
struct list_head sm_list;
csio_sm_state_t sm_state;
};
static inline void
csio_set_state(void *smp, void *state)
{
((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
}
static inline void
csio_init_state(struct csio_sm *smp, void *state)
{
csio_set_state(smp, state);
}
static inline void
csio_post_event(void *smp, uint32_t evt)
{
((struct csio_sm *)smp)->sm_state(smp, evt);
}
static inline csio_sm_state_t
csio_get_state(void *smp)
{
return ((struct csio_sm *)smp)->sm_state;
}
static inline bool
csio_match_state(void *smp, void *state)
{
return (csio_get_state(smp) == (csio_sm_state_t)state);
}
#define CSIO_ASSERT(cond) BUG_ON(!(cond))
#ifdef __CSIO_DEBUG__
#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c))
#else
#define CSIO_DB_ASSERT(__c)
#endif
#endif /* ifndef __CSIO_DEFS_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,667 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_HW_H__
#define __CSIO_HW_H__
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/compiler.h>
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/io.h>
#include <linux/spinlock_types.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include "csio_wr.h"
#include "csio_mb.h"
#include "csio_scsi.h"
#include "csio_defs.h"
#include "t4_regs.h"
#include "t4_msg.h"
/*
* An error value used by host. Should not clash with FW defined return values.
*/
#define FW_HOSTERROR 255
#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
#define FW_VERSION_MAJOR 1
#define FW_VERSION_MINOR 2
#define FW_VERSION_MICRO 8
#define CSIO_HW_NAME "Chelsio FCoE Adapter"
#define CSIO_MAX_PFN 8
#define CSIO_MAX_PPORTS 4
#define CSIO_MAX_LUN 0xFFFF
#define CSIO_MAX_QUEUE 2048
#define CSIO_MAX_CMD_PER_LUN 32
#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
#define CSIO_MAX_SECTOR_SIZE 128
/* Interrupts */
#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
* (Forward intr iq + fw iq) */
#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */
#define CSIO_MAX_SCSI_CPU 128
#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS)
#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS)
/* Queues */
enum {
CSIO_INTR_WRSIZE = 128,
CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE),
CSIO_FWEVT_WRSIZE = 128,
CSIO_FWEVT_IQLEN = 128,
CSIO_FWEVT_FLBUFS = 64,
CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN),
CSIO_HW_NIQ = 1,
CSIO_HW_NFLQ = 1,
CSIO_HW_NEQ = 1,
CSIO_HW_NINTXQ = 1,
};
struct csio_msix_entries {
unsigned short vector; /* Vector assigned by pci_enable_msix */
void *dev_id; /* Priv object associated w/ this msix*/
char desc[24]; /* Description of this vector */
};
struct csio_scsi_qset {
int iq_idx; /* Ingress index */
int eq_idx; /* Egress index */
uint32_t intr_idx; /* MSIX Vector index */
};
struct csio_scsi_cpu_info {
int16_t max_cpus;
};
extern int csio_dbg_level;
extern int csio_force_master;
extern unsigned int csio_port_mask;
extern int csio_msi;
#define CSIO_VENDOR_ID 0x1425
#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
#define CSIO_FPGA 0xA000
#define CSIO_T4_FCOE_ASIC 0x4600
#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
EDC1 | LE | TP | MA | PM_TX | PM_RX | \
ULP_RX | CPL_SWITCH | SGE | \
ULP_TX | SF)
/*
* Hard parameters used to initialize the card in the absence of a
* configuration file.
*/
enum {
/* General */
CSIO_SGE_DBFIFO_INT_THRESH = 10,
CSIO_SGE_RX_DMA_OFFSET = 2,
CSIO_SGE_FLBUF_SIZE1 = 65536,
CSIO_SGE_FLBUF_SIZE2 = 1536,
CSIO_SGE_FLBUF_SIZE3 = 9024,
CSIO_SGE_FLBUF_SIZE4 = 9216,
CSIO_SGE_FLBUF_SIZE5 = 2048,
CSIO_SGE_FLBUF_SIZE6 = 128,
CSIO_SGE_FLBUF_SIZE7 = 8192,
CSIO_SGE_FLBUF_SIZE8 = 16384,
CSIO_SGE_TIMER_VAL_0 = 5,
CSIO_SGE_TIMER_VAL_1 = 10,
CSIO_SGE_TIMER_VAL_2 = 20,
CSIO_SGE_TIMER_VAL_3 = 50,
CSIO_SGE_TIMER_VAL_4 = 100,
CSIO_SGE_TIMER_VAL_5 = 200,
CSIO_SGE_INT_CNT_VAL_0 = 1,
CSIO_SGE_INT_CNT_VAL_1 = 4,
CSIO_SGE_INT_CNT_VAL_2 = 8,
CSIO_SGE_INT_CNT_VAL_3 = 16,
/* Storage specific - used by FW_PFVF_CMD */
CSIO_WX_CAPS = FW_CMD_CAP_PF, /* w/x all */
CSIO_R_CAPS = FW_CMD_CAP_PF, /* r all */
CSIO_NVI = 4,
CSIO_NIQ_FLINT = 34,
CSIO_NETH_CTRL = 32,
CSIO_NEQ = 66,
CSIO_NEXACTF = 32,
CSIO_CMASK = FW_PFVF_CMD_CMASK_MASK,
CSIO_PMASK = FW_PFVF_CMD_PMASK_MASK,
};
/* Slowpath events */
enum csio_evt {
CSIO_EVT_FW = 0, /* FW event */
CSIO_EVT_MBX, /* MBX event */
CSIO_EVT_SCN, /* State change notification */
CSIO_EVT_DEV_LOSS, /* Device loss event */
CSIO_EVT_MAX, /* Max supported event */
};
#define CSIO_EVT_MSG_SIZE 512
#define CSIO_EVTQ_SIZE 512
/* Event msg */
struct csio_evt_msg {
struct list_head list; /* evt queue*/
enum csio_evt type;
uint8_t data[CSIO_EVT_MSG_SIZE];
};
enum {
EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
SERNUM_LEN = 16, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
TRACE_LEN = 112, /* length of trace data and mask */
};
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
};
enum { MEM_EDC0, MEM_EDC1, MEM_MC };
enum {
MEMWIN0_APERTURE = 2048,
MEMWIN0_BASE = 0x1b800,
MEMWIN1_APERTURE = 32768,
MEMWIN1_BASE = 0x28000,
MEMWIN2_APERTURE = 65536,
MEMWIN2_BASE = 0x30000,
};
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
/* flash command opcodes */
SF_PROG_PAGE = 2, /* program page */
SF_WR_DISABLE = 4, /* disable writes */
SF_RD_STATUS = 5, /* read status register */
SF_WR_ENABLE = 6, /* enable writes */
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
FW_START_SEC = 8, /* first flash sector for FW */
FW_END_SEC = 15, /* last flash sector for FW */
FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
FLASH_CFG_OFFSET = 0x1f0000,
FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
* at 1MB - 64KB */
FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
};
/*
* Flash layout.
*/
#define FLASH_START(start) ((start) * SF_SEC_SIZE)
#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
enum {
/*
* Location of firmware image in FLASH.
*/
FLASH_FW_START_SEC = 8,
FLASH_FW_NSECS = 8,
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
};
#undef FLASH_START
#undef FLASH_MAX_SIZE
/* Management module */
enum {
CSIO_MGMT_EQ_WRSIZE = 512,
CSIO_MGMT_IQ_WRSIZE = 128,
CSIO_MGMT_EQLEN = 64,
CSIO_MGMT_IQLEN = 64,
};
#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE)
#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE)
/* mgmt module stats */
struct csio_mgmtm_stats {
uint32_t n_abort_req; /* Total abort request */
uint32_t n_abort_rsp; /* Total abort response */
uint32_t n_close_req; /* Total close request */
uint32_t n_close_rsp; /* Total close response */
uint32_t n_err; /* Total Errors */
uint32_t n_drop; /* Total request dropped */
uint32_t n_active; /* Count of active_q */
uint32_t n_cbfn; /* Count of cbfn_q */
};
/* MGMT module */
struct csio_mgmtm {
struct csio_hw *hw; /* Pointer to HW moduel */
int eq_idx; /* Egress queue index */
int iq_idx; /* Ingress queue index */
int msi_vec; /* MSI vector */
struct list_head active_q; /* Outstanding ELS/CT */
struct list_head abort_q; /* Outstanding abort req */
struct list_head cbfn_q; /* Completion queue */
struct list_head mgmt_req_freelist; /* Free poll of reqs */
/* ELSCT request freelist*/
struct timer_list mgmt_timer; /* MGMT timer */
struct csio_mgmtm_stats stats; /* ELS/CT stats */
};
struct csio_adap_desc {
char model_no[16];
char description[32];
};
struct pci_params {
uint16_t vendor_id;
uint16_t device_id;
uint32_t vpd_cap_addr;
uint16_t speed;
uint8_t width;
};
/* User configurable hw parameters */
struct csio_hw_params {
uint32_t sf_size; /* serial flash
* size in bytes
*/
uint32_t sf_nsec; /* # of flash sectors */
struct pci_params pci;
uint32_t log_level; /* Module-level for
* debug log.
*/
};
struct csio_vpd {
uint32_t cclk;
uint8_t ec[EC_LEN + 1];
uint8_t sn[SERNUM_LEN + 1];
uint8_t id[ID_LEN + 1];
};
struct csio_pport {
uint16_t pcap;
uint8_t portid;
uint8_t link_status;
uint16_t link_speed;
uint8_t mac[6];
uint8_t mod_type;
uint8_t rsvd1;
uint8_t rsvd2;
uint8_t rsvd3;
};
/* fcoe resource information */
struct csio_fcoe_res_info {
uint16_t e_d_tov;
uint16_t r_a_tov_seq;
uint16_t r_a_tov_els;
uint16_t r_r_tov;
uint32_t max_xchgs;
uint32_t max_ssns;
uint32_t used_xchgs;
uint32_t used_ssns;
uint32_t max_fcfs;
uint32_t max_vnps;
uint32_t used_fcfs;
uint32_t used_vnps;
};
/* HW State machine Events */
enum csio_hw_ev {
CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */
CSIO_HWE_INIT, /* Config done, start Init */
CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */
CSIO_HWE_FATAL, /* Fatal error during initialization */
CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */
CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */
CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */
CSIO_HWE_QUIESCED, /* HBA quiesced */
CSIO_HWE_HBA_RESET, /* HBA reset requested */
CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */
CSIO_HWE_FW_DLOAD, /* FW download requested */
CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */
CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */
CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */
CSIO_HWE_MAX, /* Max HW event */
};
/* hw stats */
struct csio_hw_stats {
uint32_t n_evt_activeq; /* Number of event in active Q */
uint32_t n_evt_freeq; /* Number of event in free Q */
uint32_t n_evt_drop; /* Number of event droped */
uint32_t n_evt_unexp; /* Number of unexpected events */
uint32_t n_pcich_offline;/* Number of pci channel offline */
uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */
uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/
uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/
uint32_t n_cpl_unexp; /* Number of unexpected cpl */
uint32_t n_mbint_unexp; /* Number of unexpected mbox */
/* interrupt */
uint32_t n_plint_unexp; /* Number of unexpected PL */
/* interrupt */
uint32_t n_plint_cnt; /* Number of PL interrupt */
uint32_t n_int_stray; /* Number of stray interrupt */
uint32_t n_err; /* Number of hw errors */
uint32_t n_err_fatal; /* Number of fatal errors */
uint32_t n_err_nomem; /* Number of memory alloc failure */
uint32_t n_err_io; /* Number of IO failure */
enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */
uint64_t n_reset_start; /* Start time after the reset */
uint32_t rsvd1;
};
/* Defines for hw->flags */
#define CSIO_HWF_MASTER 0x00000001 /* This is the Master
* function for the
* card.
*/
#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt
* enable bit set?
*/
#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */
#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been
* allocated memory.
*/
#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been
* allocated in FW.
*/
#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */
#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device
* id cached */
#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing
* FW events
*/
#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config
* params
*/
#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
* enabled?
*/
#define csio_is_hw_intr_enabled(__hw) \
((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
#define csio_is_host_intr_enabled(__hw) \
((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED)
#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER)
#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID)
#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED)
#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID)
#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED)
/* Defines for intr_mode */
enum csio_intr_mode {
CSIO_IM_NONE = 0,
CSIO_IM_INTX = 1,
CSIO_IM_MSI = 2,
CSIO_IM_MSIX = 3,
};
/* Master HW structure: One per function */
struct csio_hw {
struct csio_sm sm; /* State machine: should
* be the 1st member.
*/
spinlock_t lock; /* Lock for hw */
struct csio_scsim scsim; /* SCSI module*/
struct csio_wrm wrm; /* Work request module*/
struct pci_dev *pdev; /* PCI device */
void __iomem *regstart; /* Virtual address of
* register map
*/
/* SCSI queue sets */
uint32_t num_sqsets; /* Number of SCSI
* queue sets */
uint32_t num_scsi_msix_cpus; /* Number of CPUs that
* will be used
* for ingress
* processing.
*/
struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU];
struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS];
uint32_t evtflag; /* Event flag */
uint32_t flags; /* HW flags */
struct csio_mgmtm mgmtm; /* management module */
struct csio_mbm mbm; /* Mailbox module */
/* Lnodes */
uint32_t num_lns; /* Number of lnodes */
struct csio_lnode *rln; /* Root lnode */
struct list_head sln_head; /* Sibling node list
* list
*/
int intr_iq_idx; /* Forward interrupt
* queue.
*/
int fwevt_iq_idx; /* FW evt queue */
struct work_struct evtq_work; /* Worker thread for
* HW events.
*/
struct list_head evt_free_q; /* freelist of evt
* elements
*/
struct list_head evt_active_q; /* active evt queue*/
/* board related info */
char name[32];
char hw_ver[16];
char model_desc[32];
char drv_version[32];
char fwrev_str[32];
uint32_t optrom_ver;
uint32_t fwrev;
uint32_t tp_vers;
char chip_ver;
uint32_t cfg_finiver;
uint32_t cfg_finicsum;
uint32_t cfg_cfcsum;
uint8_t cfg_csum_status;
uint8_t cfg_store;
enum csio_dev_state fw_state;
struct csio_vpd vpd;
uint8_t pfn; /* Physical Function
* number
*/
uint32_t port_vec; /* Port vector */
uint8_t num_pports; /* Number of physical
* ports.
*/
uint8_t rst_retries; /* Reset retries */
uint8_t cur_evt; /* current s/m evt */
uint8_t prev_evt; /* Previous s/m evt */
uint32_t dev_num; /* device number */
struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */
struct csio_hw_params params; /* Hw parameters */
struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */
mempool_t *mb_mempool; /* Mailbox memory pool*/
mempool_t *rnode_mempool; /* rnode memory pool */
/* Interrupt */
enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */
uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt
* index
*/
uint32_t nondata_intr_idx; /* nondata MSIX/intr
* idx
*/
uint8_t cfg_neq; /* FW configured no of
* egress queues
*/
uint8_t cfg_niq; /* FW configured no of
* iq queues.
*/
struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
/* MSIX vectors */
struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
struct dentry *debugfs_root; /* Debug FS */
struct csio_hw_stats stats; /* Hw statistics */
};
/* Register access macros */
#define csio_reg(_b, _r) ((_b) + (_r))
#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r)))
#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r)))
#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r)))
#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r)))
#define csio_wr_reg8(_h, _v, _r) writeb((_v), \
csio_reg((_h)->regstart, (_r)))
#define csio_wr_reg16(_h, _v, _r) writew((_v), \
csio_reg((_h)->regstart, (_r)))
#define csio_wr_reg32(_h, _v, _r) writel((_v), \
csio_reg((_h)->regstart, (_r)))
#define csio_wr_reg64(_h, _v, _r) writeq((_v), \
csio_reg((_h)->regstart, (_r)))
void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t);
/* Core clocks <==> uSecs */
static inline uint32_t
csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks)
{
/* add Core Clock / 2 to round ticks to nearest uS */
return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk;
}
static inline uint32_t
csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
{
return (us * hw->vpd.cclk) / 1000;
}
/* Easy access macros */
#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm))
#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm))
#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim))
#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm))
#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number)
#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn))
#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn))
#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i))
#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx)
#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i))
#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx)
/* Printing/logging */
#define CSIO_DEVID(__dev) ((__dev)->dev_num)
#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF)
#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF)
#define csio_info(__hw, __fmt, ...) \
dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
#define csio_fatal(__hw, __fmt, ...) \
dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
#define csio_err(__hw, __fmt, ...) \
dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
#define csio_warn(__hw, __fmt, ...) \
dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
#ifdef __CSIO_DEBUG__
#define csio_dbg(__hw, __fmt, ...) \
csio_info((__hw), __fmt, ##__VA_ARGS__);
#else
#define csio_dbg(__hw, __fmt, ...)
#endif
int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
void csio_hw_intr_disable(struct csio_hw *);
int csio_hw_slow_intr_handler(struct csio_hw *hw);
int csio_hw_start(struct csio_hw *);
int csio_hw_stop(struct csio_hw *);
int csio_hw_reset(struct csio_hw *);
int csio_is_hw_ready(struct csio_hw *);
int csio_is_hw_removing(struct csio_hw *);
int csio_fwevtq_handler(struct csio_hw *);
void csio_evtq_worker(struct work_struct *);
int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type,
void *evt_msg, uint16_t len);
void csio_evtq_flush(struct csio_hw *hw);
int csio_request_irqs(struct csio_hw *);
void csio_intr_enable(struct csio_hw *);
void csio_intr_disable(struct csio_hw *, bool);
struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
int csio_config_queues(struct csio_hw *);
int csio_hw_mc_read(struct csio_hw *, uint32_t,
uint32_t *, uint64_t *);
int csio_hw_edc_read(struct csio_hw *, int, uint32_t, uint32_t *,
uint64_t *);
int csio_hw_init(struct csio_hw *);
void csio_hw_exit(struct csio_hw *);
#endif /* ifndef __CSIO_HW_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,158 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_INIT_H__
#define __CSIO_INIT_H__
#include <linux/pci.h>
#include <linux/if_ether.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "csio_scsi.h"
#include "csio_lnode.h"
#include "csio_rnode.h"
#include "csio_hw.h"
#define CSIO_DRV_AUTHOR "Chelsio Communications"
#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0"
#define CSIO_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
((_dev) == CSIO_DEVID_PE10K_PF1))
/* FCoE device IDs */
#define CSIO_DEVID_PE10K 0xA000
#define CSIO_DEVID_PE10K_PF1 0xA001
#define CSIO_DEVID_T440DBG_FCOE 0x4600
#define CSIO_DEVID_T420CR_FCOE 0x4601
#define CSIO_DEVID_T422CR_FCOE 0x4602
#define CSIO_DEVID_T440CR_FCOE 0x4603
#define CSIO_DEVID_T420BCH_FCOE 0x4604
#define CSIO_DEVID_T440BCH_FCOE 0x4605
#define CSIO_DEVID_T440CH_FCOE 0x4606
#define CSIO_DEVID_T420SO_FCOE 0x4607
#define CSIO_DEVID_T420CX_FCOE 0x4608
#define CSIO_DEVID_T420BT_FCOE 0x4609
#define CSIO_DEVID_T404BT_FCOE 0x460A
#define CSIO_DEVID_B420_FCOE 0x460B
#define CSIO_DEVID_B404_FCOE 0x460C
#define CSIO_DEVID_T480CR_FCOE 0x460D
#define CSIO_DEVID_T440LPCR_FCOE 0x460E
extern struct fc_function_template csio_fc_transport_funcs;
extern struct fc_function_template csio_fc_transport_vport_funcs;
void csio_fchost_attr_init(struct csio_lnode *);
/* INTx handlers */
void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *, void *);
void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *, void *);
/* Common os lnode APIs */
void csio_lnodes_block_request(struct csio_hw *);
void csio_lnodes_unblock_request(struct csio_hw *);
void csio_lnodes_block_by_port(struct csio_hw *, uint8_t);
void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t);
struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
struct csio_lnode *);
void csio_shost_exit(struct csio_lnode *);
void csio_lnodes_exit(struct csio_hw *, bool);
static inline struct Scsi_Host *
csio_ln_to_shost(struct csio_lnode *ln)
{
return container_of((void *)ln, struct Scsi_Host, hostdata[0]);
}
/* SCSI -- locking version of get/put ioreqs */
static inline struct csio_ioreq *
csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim)
{
struct csio_ioreq *ioreq;
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
ioreq = csio_get_scsi_ioreq(scsim);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
return ioreq;
}
static inline void
csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct csio_ioreq *ioreq)
{
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
csio_put_scsi_ioreq(scsim, ioreq);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
}
/* Called in interrupt context */
static inline void
csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct list_head *reqlist, int n)
{
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
csio_put_scsi_ioreq_list(scsim, reqlist, n);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
}
/* Called in interrupt context */
static inline void
csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct list_head *reqlist, int n)
{
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
csio_put_scsi_ddp_list(scsim, reqlist, n);
spin_unlock_irqrestore(&hw->lock, flags);
}
#endif /* ifndef __CSIO_INIT_H__ */

View File

@ -0,0 +1,624 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include "csio_init.h"
#include "csio_hw.h"
static irqreturn_t
csio_nondata_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
int rv;
unsigned long flags;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
spin_lock_irqsave(&hw->lock, flags);
csio_hw_slow_intr_handler(hw);
rv = csio_mb_isr_handler(hw);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&hw->lock, flags);
return IRQ_HANDLED;
}
/*
* csio_fwevt_handler - Common FW event handler routine.
* @hw: HW module.
*
* This is the ISR for FW events. It is shared b/w MSIX
* and INTx handlers.
*/
static void
csio_fwevt_handler(struct csio_hw *hw)
{
int rv;
unsigned long flags;
rv = csio_fwevtq_handler(hw);
spin_lock_irqsave(&hw->lock, flags);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return;
}
spin_unlock_irqrestore(&hw->lock, flags);
} /* csio_fwevt_handler */
/*
* csio_fwevt_isr() - FW events MSIX ISR
* @irq:
* @dev_id:
*
* Process WRs on the FW event queue.
*
*/
static irqreturn_t
csio_fwevt_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
csio_fwevt_handler(hw);
return IRQ_HANDLED;
}
/*
* csio_fwevt_isr() - INTx wrapper for handling FW events.
* @irq:
* @dev_id:
*/
void
csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv)
{
csio_fwevt_handler(hw);
} /* csio_fwevt_intx_handler */
/*
* csio_process_scsi_cmpl - Process a SCSI WR completion.
* @hw: HW module.
* @wr: The completed WR from the ingress queue.
* @len: Length of the WR.
* @flb: Freelist buffer array.
*
*/
static void
csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *cbfn_q)
{
struct csio_ioreq *ioreq;
uint8_t *scsiwr;
uint8_t subop;
void *cmnd;
unsigned long flags;
ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
if (likely(ioreq)) {
if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
((struct fw_scsi_abrt_cls_wr *)
scsiwr)->sub_opcode_to_chk_all_io);
csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
subop ? "Close" : "Abort",
ioreq, ioreq->wr_status);
spin_lock_irqsave(&hw->lock, flags);
if (subop)
csio_scsi_closed(ioreq,
(struct list_head *)cbfn_q);
else
csio_scsi_aborted(ioreq,
(struct list_head *)cbfn_q);
/*
* We call scsi_done for I/Os that driver thinks aborts
* have timed out. If there is a race caused by FW
* completing abort at the exact same time that the
* driver has deteced the abort timeout, the following
* check prevents calling of scsi_done twice for the
* same command: once from the eh_abort_handler, another
* from csio_scsi_isr_handler(). This also avoids the
* need to check if csio_scsi_cmnd(req) is NULL in the
* fast path.
*/
cmnd = csio_scsi_cmnd(ioreq);
if (unlikely(cmnd == NULL))
list_del_init(&ioreq->sm.sm_list);
spin_unlock_irqrestore(&hw->lock, flags);
if (unlikely(cmnd == NULL))
csio_put_scsi_ioreq_lock(hw,
csio_hw_to_scsim(hw), ioreq);
} else {
spin_lock_irqsave(&hw->lock, flags);
csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
spin_unlock_irqrestore(&hw->lock, flags);
}
}
}
/*
* csio_scsi_isr_handler() - Common SCSI ISR handler.
* @iq: Ingress queue pointer.
*
* Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
* by calling csio_wr_process_iq_idx. If there are completions on the
* isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
* Once done, add these completions onto the freelist.
* This routine is shared b/w MSIX and INTx.
*/
static inline irqreturn_t
csio_scsi_isr_handler(struct csio_q *iq)
{
struct csio_hw *hw = (struct csio_hw *)iq->owner;
LIST_HEAD(cbfn_q);
struct list_head *tmp;
struct csio_scsim *scm;
struct csio_ioreq *ioreq;
int isr_completions = 0;
scm = csio_hw_to_scsim(hw);
if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
&cbfn_q) != 0))
return IRQ_NONE;
/* Call back the completion routines */
list_for_each(tmp, &cbfn_q) {
ioreq = (struct csio_ioreq *)tmp;
isr_completions++;
ioreq->io_cbfn(hw, ioreq);
/* Release ddp buffer if used for this req */
if (unlikely(ioreq->dcopy))
csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
ioreq->nsge);
}
if (isr_completions) {
/* Return the ioreqs back to ioreq->freelist */
csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
isr_completions);
}
return IRQ_HANDLED;
}
/*
* csio_scsi_isr() - SCSI MSIX handler
* @irq:
* @dev_id:
*
* This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
* for handling SCSI completions.
*/
static irqreturn_t
csio_scsi_isr(int irq, void *dev_id)
{
struct csio_q *iq = (struct csio_q *) dev_id;
struct csio_hw *hw;
if (unlikely(!iq))
return IRQ_NONE;
hw = (struct csio_hw *)iq->owner;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
csio_scsi_isr_handler(iq);
return IRQ_HANDLED;
}
/*
* csio_scsi_intx_handler() - SCSI INTx handler
* @irq:
* @dev_id:
*
* This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
* for handling SCSI completions.
*/
void
csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv)
{
struct csio_q *iq = priv;
csio_scsi_isr_handler(iq);
} /* csio_scsi_intx_handler */
/*
* csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
* @irq:
* @dev_id:
*
*
*/
static irqreturn_t
csio_fcoe_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
struct csio_q *intx_q = NULL;
int rv;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
/* Disable the interrupt for this PCI function. */
if (hw->intr_mode == CSIO_IM_INTX)
csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
/*
* The read in the following function will flush the
* above write.
*/
if (csio_hw_slow_intr_handler(hw))
ret = IRQ_HANDLED;
/* Get the INTx Forward interrupt IQ. */
intx_q = csio_get_q(hw, hw->intr_iq_idx);
CSIO_DB_ASSERT(intx_q);
/* IQ handler is not possible for intx_q, hence pass in NULL */
if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
ret = IRQ_HANDLED;
spin_lock_irqsave(&hw->lock, flags);
rv = csio_mb_isr_handler(hw);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&hw->lock, flags);
return ret;
}
static void
csio_add_msix_desc(struct csio_hw *hw)
{
int i;
struct csio_msix_entries *entryp = &hw->msix_entries[0];
int k = CSIO_EXTRA_VECS;
int len = sizeof(entryp->desc) - 1;
int cnt = hw->num_sqsets + k;
/* Non-data vector */
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
entryp++;
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
entryp++;
/* Name SCSI vecs */
for (i = k; i < cnt; i++, entryp++) {
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
}
}
int
csio_request_irqs(struct csio_hw *hw)
{
int rv, i, j, k = 0;
struct csio_msix_entries *entryp = &hw->msix_entries[0];
struct csio_scsi_cpu_info *info;
if (hw->intr_mode != CSIO_IM_MSIX) {
rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
(hw->intr_mode == CSIO_IM_MSI) ?
0 : IRQF_SHARED,
KBUILD_MODNAME, hw);
if (rv) {
if (hw->intr_mode == CSIO_IM_MSI)
pci_disable_msi(hw->pdev);
csio_err(hw, "Failed to allocate interrupt line.\n");
return -EINVAL;
}
goto out;
}
/* Add the MSIX vector descriptions */
csio_add_msix_desc(hw);
rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
entryp[k].desc, hw);
if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k++].dev_id = (void *)hw;
rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
entryp[k].desc, hw);
if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k++].dev_id = (void *)hw;
/* Allocate IRQs for SCSI */
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < info->max_cpus; j++, k++) {
struct csio_scsi_qset *sqset = &hw->sqset[i][j];
struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
entryp[k].desc, q);
if (rv) {
csio_err(hw,
"IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k].dev_id = (void *)q;
} /* for all scsi cpus */
} /* for all ports */
out:
hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
return 0;
err:
for (i = 0; i < k; i++) {
entryp = &hw->msix_entries[i];
free_irq(entryp->vector, entryp->dev_id);
}
pci_disable_msix(hw->pdev);
return -EINVAL;
}
static void
csio_disable_msix(struct csio_hw *hw, bool free)
{
int i;
struct csio_msix_entries *entryp;
int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
if (free) {
for (i = 0; i < cnt; i++) {
entryp = &hw->msix_entries[i];
free_irq(entryp->vector, entryp->dev_id);
}
}
pci_disable_msix(hw->pdev);
}
/* Reduce per-port max possible CPUs */
static void
csio_reduce_sqsets(struct csio_hw *hw, int cnt)
{
int i;
struct csio_scsi_cpu_info *info;
while (cnt < hw->num_sqsets) {
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
if (info->max_cpus > 1) {
info->max_cpus--;
hw->num_sqsets--;
if (hw->num_sqsets <= cnt)
break;
}
}
}
csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
}
static int
csio_enable_msix(struct csio_hw *hw)
{
int rv, i, j, k, n, min, cnt;
struct csio_msix_entries *entryp;
struct msix_entry *entries;
int extra = CSIO_EXTRA_VECS;
struct csio_scsi_cpu_info *info;
min = hw->num_pports + extra;
cnt = hw->num_sqsets + extra;
/* Max vectors required based on #niqs configured in fw */
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
cnt = min_t(uint8_t, hw->cfg_niq, cnt);
entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
if (!entries)
return -ENOMEM;
for (i = 0; i < cnt; i++)
entries[i].entry = (uint16_t)i;
csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)
cnt = rv;
if (!rv) {
if (cnt < (hw->num_sqsets + extra)) {
csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
csio_reduce_sqsets(hw, cnt - extra);
}
} else {
if (rv > 0) {
pci_disable_msix(hw->pdev);
csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
}
kfree(entries);
return -ENOMEM;
}
/* Save off vectors */
for (i = 0; i < cnt; i++) {
entryp = &hw->msix_entries[i];
entryp->vector = entries[i].vector;
}
/* Distribute vectors */
k = 0;
csio_set_nondata_intr_idx(hw, entries[k].entry);
csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
csio_set_fwevt_intr_idx(hw, entries[k++].entry);
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
n = (j % info->max_cpus) + k;
hw->sqset[i][j].intr_idx = entries[n].entry;
}
k += info->max_cpus;
}
kfree(entries);
return 0;
}
void
csio_intr_enable(struct csio_hw *hw)
{
hw->intr_mode = CSIO_IM_NONE;
hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
/* Try MSIX, then MSI or fall back to INTx */
if ((csio_msi == 2) && !csio_enable_msix(hw))
hw->intr_mode = CSIO_IM_MSIX;
else {
/* Max iqs required based on #niqs configured in fw */
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
!csio_is_hw_master(hw)) {
int extra = CSIO_EXTRA_MSI_IQS;
if (hw->cfg_niq < (hw->num_sqsets + extra)) {
csio_dbg(hw, "Reducing sqsets to %d\n",
hw->cfg_niq - extra);
csio_reduce_sqsets(hw, hw->cfg_niq - extra);
}
}
if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
hw->intr_mode = CSIO_IM_MSI;
else
hw->intr_mode = CSIO_IM_INTX;
}
csio_dbg(hw, "Using %s interrupt mode.\n",
(hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
}
void
csio_intr_disable(struct csio_hw *hw, bool free)
{
csio_hw_intr_disable(hw);
switch (hw->intr_mode) {
case CSIO_IM_MSIX:
csio_disable_msix(hw, free);
break;
case CSIO_IM_MSI:
if (free)
free_irq(hw->pdev->irq, hw);
pci_disable_msi(hw->pdev);
break;
case CSIO_IM_INTX:
if (free)
free_irq(hw->pdev->irq, hw);
break;
default:
break;
}
hw->intr_mode = CSIO_IM_NONE;
hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,255 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_LNODE_H__
#define __CSIO_LNODE_H__
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <scsi/fc/fc_els.h>
#include "csio_defs.h"
#include "csio_hw.h"
#define CSIO_FCOE_MAX_NPIV 128
#define CSIO_FCOE_MAX_RNODES 2048
/* FDMI port attribute unknown speed */
#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000
extern int csio_fcoe_rnodes;
extern int csio_fdmi_enable;
/* State machine evets */
enum csio_ln_ev {
CSIO_LNE_NONE = (uint32_t)0,
CSIO_LNE_LINKUP,
CSIO_LNE_FAB_INIT_DONE,
CSIO_LNE_LINK_DOWN,
CSIO_LNE_DOWN_LINK,
CSIO_LNE_LOGO,
CSIO_LNE_CLOSE,
CSIO_LNE_MAX_EVENT,
};
struct csio_fcf_info {
struct list_head list;
uint8_t priority;
uint8_t mac[6];
uint8_t name_id[8];
uint8_t fabric[8];
uint16_t vf_id;
uint8_t vlan_id;
uint16_t max_fcoe_size;
uint8_t fc_map[3];
uint32_t fka_adv;
uint32_t fcfi;
uint8_t get_next:1;
uint8_t link_aff:1;
uint8_t fpma:1;
uint8_t spma:1;
uint8_t login:1;
uint8_t portid;
uint8_t spma_mac[6];
struct kref kref;
};
/* Defines for flags */
#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */
#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */
#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */
#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */
/* Transport events */
enum csio_ln_fc_evt {
CSIO_LN_FC_LINKUP = 1,
CSIO_LN_FC_LINKDOWN,
CSIO_LN_FC_RSCN,
CSIO_LN_FC_ATTRIB_UPDATE,
};
/* Lnode stats */
struct csio_lnode_stats {
uint32_t n_link_up; /* Link down */
uint32_t n_link_down; /* Link up */
uint32_t n_err; /* error */
uint32_t n_err_nomem; /* memory not available */
uint32_t n_inval_parm; /* Invalid parameters */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* dropped event */
uint32_t n_rnode_match; /* matched rnode */
uint32_t n_dev_loss_tmo; /* Device loss timeout */
uint32_t n_fdmi_err; /* fdmi err */
uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
uint32_t n_rnode_alloc; /* rnode allocated */
uint32_t n_rnode_free; /* rnode freed */
uint32_t n_rnode_nomem; /* rnode alloc failure */
uint32_t n_input_requests; /* Input Requests */
uint32_t n_output_requests; /* Output Requests */
uint32_t n_control_requests; /* Control Requests */
uint32_t n_input_bytes; /* Input Bytes */
uint32_t n_output_bytes; /* Output Bytes */
uint32_t rsvd1;
};
/* Common Lnode params */
struct csio_lnode_params {
uint32_t ra_tov;
uint32_t fcfi;
uint32_t log_level; /* Module level for debugging */
};
struct csio_service_parms {
struct fc_els_csp csp; /* Common service parms */
uint8_t wwpn[8]; /* WWPN */
uint8_t wwnn[8]; /* WWNN */
struct fc_els_cssp clsp[4]; /* Class service params */
uint8_t vvl[16]; /* Vendor version level */
};
/* Lnode */
struct csio_lnode {
struct csio_sm sm; /* State machine + sibling
* lnode list.
*/
struct csio_hw *hwp; /* Pointer to the HW module */
uint8_t portid; /* Port ID */
uint8_t rsvd1;
uint16_t rsvd2;
uint32_t dev_num; /* Device number */
uint32_t flags; /* Flags */
struct list_head fcf_lsthead; /* FCF entries */
struct csio_fcf_info *fcfinfo; /* FCF in use */
struct csio_ioreq *mgmt_req; /* MGMT request */
/* FCoE identifiers */
uint8_t mac[6];
uint32_t nport_id;
struct csio_service_parms ln_sparm; /* Service parms */
/* Firmware identifiers */
uint32_t fcf_flowid; /*fcf flowid */
uint32_t vnp_flowid;
uint16_t ssn_cnt; /* Registered Session */
uint8_t cur_evt; /* Current event */
uint8_t prev_evt; /* Previous event */
/* Children */
struct list_head cln_head; /* Head of the children lnode
* list.
*/
uint32_t num_vports; /* Total NPIV/children LNodes*/
struct csio_lnode *pln; /* Parent lnode of child
* lnodes.
*/
struct list_head cmpl_q; /* Pending I/Os on this lnode */
/* Remote node information */
struct list_head rnhead; /* Head of rnode list */
uint32_t num_reg_rnodes; /* Number of rnodes registered
* with the host.
*/
uint32_t n_scsi_tgts; /* Number of scsi targets
* found
*/
uint32_t last_scan_ntgts;/* Number of scsi targets
* found per last scan.
*/
uint32_t tgt_scan_tick; /* timer started after
* new tgt found
*/
/* FC transport data */
struct fc_vport *fc_vport;
struct fc_host_statistics fch_stats;
struct csio_lnode_stats stats; /* Common lnode stats */
struct csio_lnode_params params; /* Common lnode params */
};
#define csio_lnode_to_hw(ln) ((ln)->hwp)
#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln)
#define csio_parent_lnode(ln) ((ln)->pln)
#define csio_ln_flowid(ln) ((ln)->vnp_flowid)
#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn)
#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn)
#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0)
#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0)
#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0)
#define csio_ln_dbg(_ln, _fmt, ...) \
csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
#define csio_ln_err(_ln, _fmt, ...) \
csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
#define csio_ln_warn(_ln, _fmt, ...) \
csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
/* HW->Lnode notifications */
enum csio_ln_notify {
CSIO_LN_NOTIFY_HWREADY = 1,
CSIO_LN_NOTIFY_HWSTOP,
CSIO_LN_NOTIFY_HWREMOVE,
CSIO_LN_NOTIFY_HWRESET,
};
void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *);
int csio_is_lnode_ready(struct csio_lnode *);
void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str);
struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *);
int csio_get_phy_port_stats(struct csio_hw *, uint8_t ,
struct fw_fcoe_port_stats *);
int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long,
unsigned long, unsigned long);
void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify);
void csio_disable_lnodes(struct csio_hw *, uint8_t, bool);
void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt);
int csio_ln_fdmi_start(struct csio_lnode *, void *);
int csio_lnode_start(struct csio_lnode *);
void csio_lnode_stop(struct csio_lnode *);
void csio_lnode_close(struct csio_lnode *);
int csio_lnode_init(struct csio_lnode *, struct csio_hw *,
struct csio_lnode *);
void csio_lnode_exit(struct csio_lnode *);
#endif /* ifndef __CSIO_LNODE_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,278 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_MB_H__
#define __CSIO_MB_H__
#include <linux/timer.h>
#include <linux/completion.h>
#include "t4fw_api.h"
#include "t4fw_api_stor.h"
#include "csio_defs.h"
#define CSIO_STATS_OFFSET (2)
#define CSIO_NUM_STATS_PER_MB (6)
struct fw_fcoe_port_cmd_params {
uint8_t portid;
uint8_t idx;
uint8_t nstats;
};
#define CSIO_DUMP_MB(__hw, __num, __mb) \
csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \
(unsigned long long)csio_rd_reg64(__hw, __mb), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 8), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 16), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 24), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 32), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 40), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 48), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 56))
#define CSIO_MB_MAX_REGS 8
#define CSIO_MAX_MB_SIZE 64
#define CSIO_MB_POLL_FREQ 5 /* 5 ms */
#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT
/* Device master in HELLO command */
enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST };
enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL };
enum csio_dev_state {
CSIO_DEV_STATE_UNINIT,
CSIO_DEV_STATE_INIT,
CSIO_DEV_STATE_ERR
};
#define FW_PARAM_DEV(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
FW_PARAMS_PARAM_Y(0) | \
FW_PARAMS_PARAM_Z(0))
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \
do { \
if (__clear) \
memset((__cp), 0, \
CSIO_MB_MAX_REGS * sizeof(__be64)); \
INIT_LIST_HEAD(&(__mbp)->list); \
(__mbp)->tmo = (__tmo); \
(__mbp)->priv = (void *)(__priv); \
(__mbp)->mb_cbfn = (__fn); \
(__mbp)->mb_size = sizeof(*(__cp)); \
} while (0)
struct csio_mbm_stats {
uint32_t n_req; /* number of mbox req */
uint32_t n_rsp; /* number of mbox rsp */
uint32_t n_activeq; /* number of mbox req active Q */
uint32_t n_cbfnq; /* number of mbox req cbfn Q */
uint32_t n_tmo; /* number of mbox timeout */
uint32_t n_cancel; /* number of mbox cancel */
uint32_t n_err; /* number of mbox error */
};
/* Driver version of Mailbox */
struct csio_mb {
struct list_head list; /* for req/resp */
/* queue in driver */
__be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */
int mb_size; /* Size of this
* mailbox.
*/
uint32_t tmo; /* Timeout */
struct completion cmplobj; /* MB Completion
* object
*/
void (*mb_cbfn) (struct csio_hw *, struct csio_mb *);
/* Callback fn */
void *priv; /* Owner private ptr */
};
struct csio_mbm {
uint32_t a_mbox; /* Async mbox num */
uint32_t intr_idx; /* Interrupt index */
struct timer_list timer; /* Mbox timer */
struct list_head req_q; /* Mbox request queue */
struct list_head cbfn_q; /* Mbox completion q */
struct csio_mb *mcurrent; /* Current mailbox */
uint32_t req_q_cnt; /* Outstanding mbox
* cmds
*/
struct csio_mbm_stats stats; /* Statistics */
};
#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i))
#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx)
struct csio_iq_params;
struct csio_eq_params;
enum fw_retval csio_mb_fw_retval(struct csio_mb *);
/* MB helpers */
void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t,
uint32_t, uint32_t, enum csio_dev_master,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, enum csio_dev_state *,
uint8_t *);
void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int,
unsigned int, unsigned int, const u32 *, u32 *, bool,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, unsigned int , u32 *);
void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
int reg);
void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,
bool, bool, bool, bool,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_rss_glb_config(struct csio_hw *, struct csio_mb *,
uint32_t, uint8_t, unsigned int,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,
uint8_t, bool, uint32_t, uint16_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, uint16_t *);
void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_iq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, struct csio_iq_params *);
void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_iq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_eq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, struct csio_eq_params *);
void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *,
uint32_t , struct csio_eq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *,
uint32_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t , uint32_t , uint16_t,
uint8_t [8], uint8_t [8],
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t , uint32_t ,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t , uint32_t, uint32_t ,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t, uint32_t,
void (*cbfn) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw,
struct csio_mb *mbp, uint32_t mb_tmo,
struct fw_fcoe_port_cmd_params *portparams,
void (*cbfn)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,
enum fw_retval *retval,
struct fw_fcoe_port_cmd_params *portparams,
struct fw_fcoe_port_stats *portstats);
/* MB module functions */
int csio_mbm_init(struct csio_mbm *, struct csio_hw *,
void (*)(uintptr_t));
void csio_mbm_exit(struct csio_mbm *);
void csio_mb_intr_enable(struct csio_hw *);
void csio_mb_intr_disable(struct csio_hw *);
int csio_mb_issue(struct csio_hw *, struct csio_mb *);
void csio_mb_completions(struct csio_hw *, struct list_head *);
int csio_mb_fwevt_handler(struct csio_hw *, __be64 *);
int csio_mb_isr_handler(struct csio_hw *);
struct csio_mb *csio_mb_tmo_handler(struct csio_hw *);
void csio_mb_cancel_all(struct csio_hw *, struct list_head *);
#endif /* ifndef __CSIO_MB_H__ */

View File

@ -0,0 +1,912 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fs.h>
#include "csio_hw.h"
#include "csio_lnode.h"
#include "csio_rnode.h"
static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
static void csio_rnode_exit(struct csio_rnode *);
/* Static machine forward declarations */
static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
/* RNF event mapping */
static enum csio_rn_ev fwevt_to_rnevt[] = {
CSIO_RNFE_NONE, /* None */
CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
CSIO_RNFE_NONE, /* PRLI_TMO */
CSIO_RNFE_NONE, /* ADISC_TMO */
CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
CSIO_RNFE_NONE, /* LOGO_SNT */
CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
};
#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
CSIO_RNFE_NONE : \
fwevt_to_rnevt[_evt])
int
csio_is_rnode_ready(struct csio_rnode *rn)
{
return csio_match_state(rn, csio_rns_ready);
}
static int
csio_is_rnode_uninit(struct csio_rnode *rn)
{
return csio_match_state(rn, csio_rns_uninit);
}
static int
csio_is_rnode_wka(uint8_t rport_type)
{
if ((rport_type == FLOGI_VFPORT) ||
(rport_type == FDISC_VFPORT) ||
(rport_type == NS_VNPORT) ||
(rport_type == FDMI_VNPORT))
return 1;
return 0;
}
/*
* csio_rn_lookup - Finds the rnode with the given flowid
* @ln - lnode
* @flowid - flowid.
*
* Does the rnode lookup on the given lnode and flowid.If no matching entry
* found, NULL is returned.
*/
static struct csio_rnode *
csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
{
struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
struct list_head *tmp;
struct csio_rnode *rn;
list_for_each(tmp, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
if (rn->flowid == flowid)
return rn;
}
return NULL;
}
/*
* csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
* @ln: lnode
* @wwpn: wwpn
*
* Does the rnode lookup on the given lnode and wwpn. If no matching entry
* found, NULL is returned.
*/
static struct csio_rnode *
csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
{
struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
struct list_head *tmp;
struct csio_rnode *rn;
list_for_each(tmp, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
return rn;
}
return NULL;
}
/**
* csio_rnode_lookup_portid - Finds the rnode with the given portid
* @ln: lnode
* @portid: port id
*
* Lookup the rnode list for a given portid. If no matching entry
* found, NULL is returned.
*/
struct csio_rnode *
csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
{
struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
struct list_head *tmp;
struct csio_rnode *rn;
list_for_each(tmp, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
if (rn->nport_id == portid)
return rn;
}
return NULL;
}
static int
csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
uint32_t *vnp_flowid)
{
struct csio_rnode *rnhead;
struct list_head *tmp, *tmp1;
struct csio_rnode *rn;
struct csio_lnode *ln_tmp;
struct csio_hw *hw = csio_lnode_to_hw(ln);
list_for_each(tmp1, &hw->sln_head) {
ln_tmp = (struct csio_lnode *) tmp1;
if (ln_tmp == ln)
continue;
rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
list_for_each(tmp, &rnhead->sm.sm_list) {
rn = (struct csio_rnode *) tmp;
if (csio_is_rnode_ready(rn)) {
if (rn->flowid == rdev_flowid) {
*vnp_flowid = csio_ln_flowid(ln_tmp);
return 1;
}
}
}
}
return 0;
}
static struct csio_rnode *
csio_alloc_rnode(struct csio_lnode *ln)
{
struct csio_hw *hw = csio_lnode_to_hw(ln);
struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
if (!rn)
goto err;
memset(rn, 0, sizeof(struct csio_rnode));
if (csio_rnode_init(rn, ln))
goto err_free;
CSIO_INC_STATS(ln, n_rnode_alloc);
return rn;
err_free:
mempool_free(rn, hw->rnode_mempool);
err:
CSIO_INC_STATS(ln, n_rnode_nomem);
return NULL;
}
static void
csio_free_rnode(struct csio_rnode *rn)
{
struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
csio_rnode_exit(rn);
CSIO_INC_STATS(rn->lnp, n_rnode_free);
mempool_free(rn, hw->rnode_mempool);
}
/*
* csio_get_rnode - Gets rnode with the given flowid
* @ln - lnode
* @flowid - flow id.
*
* Does the rnode lookup on the given lnode and flowid. If no matching
* rnode found, then new rnode with given npid is allocated and returned.
*/
static struct csio_rnode *
csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
{
struct csio_rnode *rn;
rn = csio_rn_lookup(ln, flowid);
if (!rn) {
rn = csio_alloc_rnode(ln);
if (!rn)
return NULL;
rn->flowid = flowid;
}
return rn;
}
/*
* csio_put_rnode - Frees the given rnode
* @ln - lnode
* @flowid - flow id.
*
* Does the rnode lookup on the given lnode and flowid. If no matching
* rnode found, then new rnode with given npid is allocated and returned.
*/
void
csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
{
CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
csio_free_rnode(rn);
}
/*
* csio_confirm_rnode - confirms rnode based on wwpn.
* @ln: lnode
* @rdev_flowid: remote device flowid
* @rdevp: remote device params
* This routines searches other rnode in list having same wwpn of new rnode.
* If there is a match, then matched rnode is returned and otherwise new rnode
* is returned.
* returns rnode.
*/
struct csio_rnode *
csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
struct fcoe_rdev_entry *rdevp)
{
uint8_t rport_type;
struct csio_rnode *rn, *match_rn;
uint32_t vnp_flowid;
uint32_t *port_id;
port_id = (uint32_t *)&rdevp->r_id[0];
rport_type =
FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
/* Drop rdev event for cntrl port */
if (rport_type == FAB_CTLR_VNPORT) {
csio_ln_dbg(ln,
"Unhandled rport_type:%d recv in rdev evt "
"ssni:x%x\n", rport_type, rdev_flowid);
return NULL;
}
/* Lookup on flowid */
rn = csio_rn_lookup(ln, rdev_flowid);
if (!rn) {
/* Drop events with duplicate flowid */
if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
csio_ln_warn(ln,
"ssni:%x already active on vnpi:%x",
rdev_flowid, vnp_flowid);
return NULL;
}
/* Lookup on wwpn for NPORTs */
rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
if (!rn)
goto alloc_rnode;
} else {
/* Lookup well-known ports with nport id */
if (csio_is_rnode_wka(rport_type)) {
match_rn = csio_rnode_lookup_portid(ln,
((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
if (match_rn == NULL) {
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
goto alloc_rnode;
}
/*
* Now compare the wwpn to confirm that
* same port relogged in. If so update the matched rn.
* Else, go ahead and alloc a new rnode.
*/
if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
if (csio_is_rnode_ready(rn)) {
csio_ln_warn(ln,
"rnode is already"
"active ssni:x%x\n",
rdev_flowid);
CSIO_ASSERT(0);
}
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
rn = match_rn;
/* Update rn */
goto found_rnode;
}
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
goto alloc_rnode;
}
/* wwpn match */
if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
goto found_rnode;
/* Search for rnode that have same wwpn */
match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
if (match_rn != NULL) {
csio_ln_dbg(ln,
"ssni:x%x changed for rport name(wwpn):%llx "
"did:x%x\n", rdev_flowid,
wwn_to_u64(rdevp->wwpn),
match_rn->nport_id);
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
rn = match_rn;
} else {
csio_ln_dbg(ln,
"rnode wwpn mismatch found ssni:x%x "
"name(wwpn):%llx\n",
rdev_flowid,
wwn_to_u64(csio_rn_wwpn(rn)));
if (csio_is_rnode_ready(rn)) {
csio_ln_warn(ln,
"rnode is already active "
"wwpn:%llx ssni:x%x\n",
wwn_to_u64(csio_rn_wwpn(rn)),
rdev_flowid);
CSIO_ASSERT(0);
}
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
goto alloc_rnode;
}
}
found_rnode:
csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
/* Update flowid */
csio_rn_flowid(rn) = rdev_flowid;
/* update rdev entry */
rn->rdev_entry = rdevp;
CSIO_INC_STATS(ln, n_rnode_match);
return rn;
alloc_rnode:
rn = csio_get_rnode(ln, rdev_flowid);
if (!rn)
return NULL;
csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
/* update rdev entry */
rn->rdev_entry = rdevp;
return rn;
}
/*
* csio_rn_verify_rparams - verify rparams.
* @ln: lnode
* @rn: rnode
* @rdevp: remote device params
* returns success if rparams are verified.
*/
static int
csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
struct fcoe_rdev_entry *rdevp)
{
uint8_t null[8];
uint8_t rport_type;
uint8_t fc_class;
uint32_t *did;
did = (uint32_t *) &rdevp->r_id[0];
rport_type =
FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
switch (rport_type) {
case FLOGI_VFPORT:
rn->role = CSIO_RNFR_FABRIC;
if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
csio_rn_flowid(rn));
return -EINVAL;
}
/* NPIV support */
if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
ln->flags |= CSIO_LNF_NPIVSUPP;
break;
case NS_VNPORT:
rn->role = CSIO_RNFR_NS;
if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
csio_rn_flowid(rn));
return -EINVAL;
}
break;
case REG_FC4_VNPORT:
case REG_VNPORT:
rn->role = CSIO_RNFR_NPORT;
if (rdevp->event_cause == PRLI_ACC_RCVD ||
rdevp->event_cause == PRLI_RCVD) {
if (FW_RDEV_WR_TASK_RETRY_ID_GET(
rdevp->enh_disc_to_tgt))
rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
rn->fcp_flags |= FCP_SPPF_RETRY;
if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
rn->role |= CSIO_RNFR_TARGET;
if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
rn->role |= CSIO_RNFR_INITIATOR;
}
break;
case FDMI_VNPORT:
case FAB_CTLR_VNPORT:
rn->role = 0;
break;
default:
csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
csio_rn_flowid(rn), rport_type);
return -EINVAL;
}
/* validate wwpn/wwnn for Name server/remote port */
if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
memset(null, 0, 8);
if (!memcmp(rdevp->wwnn, null, 8)) {
csio_ln_err(ln,
"ssni:x%x invalid wwnn received from"
" rport did:x%x\n",
csio_rn_flowid(rn),
(ntohl(*did) & CSIO_DID_MASK));
return -EINVAL;
}
if (!memcmp(rdevp->wwpn, null, 8)) {
csio_ln_err(ln,
"ssni:x%x invalid wwpn received from"
" rport did:x%x\n",
csio_rn_flowid(rn),
(ntohl(*did) & CSIO_DID_MASK));
return -EINVAL;
}
}
/* Copy wwnn, wwpn and nport id */
rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
rn->rn_sparm.csp.sp_bb_data = ntohs(rdevp->rcv_fr_sz);
fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
return 0;
}
static void
__csio_reg_rnode(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
struct csio_hw *hw = csio_lnode_to_hw(ln);
spin_unlock_irq(&hw->lock);
csio_reg_rnode(rn);
spin_lock_irq(&hw->lock);
if (rn->role & CSIO_RNFR_TARGET)
ln->n_scsi_tgts++;
if (rn->nport_id == FC_FID_MGMT_SERV)
csio_ln_fdmi_start(ln, (void *) rn);
}
static void
__csio_unreg_rnode(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
struct csio_hw *hw = csio_lnode_to_hw(ln);
LIST_HEAD(tmp_q);
int cmpl = 0;
if (!list_empty(&rn->host_cmpl_q)) {
csio_dbg(hw, "Returning completion queue I/Os\n");
list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
cmpl = 1;
}
if (rn->role & CSIO_RNFR_TARGET) {
ln->n_scsi_tgts--;
ln->last_scan_ntgts--;
}
spin_unlock_irq(&hw->lock);
csio_unreg_rnode(rn);
spin_lock_irq(&hw->lock);
/* Cleanup I/Os that were waiting for rnode to unregister */
if (cmpl)
csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
}
/*****************************************************************************/
/* START: Rnode SM */
/*****************************************************************************/
/*
* csio_rns_uninit -
* @rn - rnode
* @evt - SM event.
*
*/
static void
csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
int ret = 0;
CSIO_INC_STATS(rn, n_evt_sm[evt]);
switch (evt) {
case CSIO_RNFE_LOGGED_IN:
case CSIO_RNFE_PLOGI_RECV:
ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
if (!ret) {
csio_set_state(&rn->sm, csio_rns_ready);
__csio_reg_rnode(rn);
} else {
CSIO_INC_STATS(rn, n_err_inval);
}
break;
case CSIO_RNFE_LOGO_RECV:
csio_ln_dbg(ln,
"ssni:x%x Ignoring event %d recv "
"in rn state[uninit]\n", csio_rn_flowid(rn), evt);
CSIO_INC_STATS(rn, n_evt_drop);
break;
default:
csio_ln_dbg(ln,
"ssni:x%x unexp event %d recv "
"in rn state[uninit]\n", csio_rn_flowid(rn), evt);
CSIO_INC_STATS(rn, n_evt_unexp);
break;
}
}
/*
* csio_rns_ready -
* @rn - rnode
* @evt - SM event.
*
*/
static void
csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
int ret = 0;
CSIO_INC_STATS(rn, n_evt_sm[evt]);
switch (evt) {
case CSIO_RNFE_LOGGED_IN:
case CSIO_RNFE_PLOGI_RECV:
csio_ln_dbg(ln,
"ssni:x%x Ignoring event %d recv from did:x%x "
"in rn state[ready]\n", csio_rn_flowid(rn), evt,
rn->nport_id);
CSIO_INC_STATS(rn, n_evt_drop);
break;
case CSIO_RNFE_PRLI_DONE:
case CSIO_RNFE_PRLI_RECV:
ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
if (!ret)
__csio_reg_rnode(rn);
else
CSIO_INC_STATS(rn, n_err_inval);
break;
case CSIO_RNFE_DOWN:
csio_set_state(&rn->sm, csio_rns_offline);
__csio_unreg_rnode(rn);
/* FW expected to internally aborted outstanding SCSI WRs
* and return all SCSI WRs to host with status "ABORTED".
*/
break;
case CSIO_RNFE_LOGO_RECV:
csio_set_state(&rn->sm, csio_rns_offline);
__csio_unreg_rnode(rn);
/* FW expected to internally aborted outstanding SCSI WRs
* and return all SCSI WRs to host with status "ABORTED".
*/
break;
case CSIO_RNFE_CLOSE:
/*
* Each rnode receives CLOSE event when driver is removed or
* device is reset
* Note: All outstanding IOs on remote port need to returned
* to uppper layer with appropriate error before sending
* CLOSE event
*/
csio_set_state(&rn->sm, csio_rns_uninit);
__csio_unreg_rnode(rn);
break;
case CSIO_RNFE_NAME_MISSING:
csio_set_state(&rn->sm, csio_rns_disappeared);
__csio_unreg_rnode(rn);
/*
* FW expected to internally aborted outstanding SCSI WRs
* and return all SCSI WRs to host with status "ABORTED".
*/
break;
default:
csio_ln_dbg(ln,
"ssni:x%x unexp event %d recv from did:x%x "
"in rn state[uninit]\n", csio_rn_flowid(rn), evt,
rn->nport_id);
CSIO_INC_STATS(rn, n_evt_unexp);
break;
}
}
/*
* csio_rns_offline -
* @rn - rnode
* @evt - SM event.
*
*/
static void
csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
int ret = 0;
CSIO_INC_STATS(rn, n_evt_sm[evt]);
switch (evt) {
case CSIO_RNFE_LOGGED_IN:
case CSIO_RNFE_PLOGI_RECV:
ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
if (!ret) {
csio_set_state(&rn->sm, csio_rns_ready);
__csio_reg_rnode(rn);
} else {
CSIO_INC_STATS(rn, n_err_inval);
csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
}
break;
case CSIO_RNFE_DOWN:
csio_ln_dbg(ln,
"ssni:x%x Ignoring event %d recv from did:x%x "
"in rn state[offline]\n", csio_rn_flowid(rn), evt,
rn->nport_id);
CSIO_INC_STATS(rn, n_evt_drop);
break;
case CSIO_RNFE_CLOSE:
/* Each rnode receives CLOSE event when driver is removed or
* device is reset
* Note: All outstanding IOs on remote port need to returned
* to uppper layer with appropriate error before sending
* CLOSE event
*/
csio_set_state(&rn->sm, csio_rns_uninit);
break;
case CSIO_RNFE_NAME_MISSING:
csio_set_state(&rn->sm, csio_rns_disappeared);
break;
default:
csio_ln_dbg(ln,
"ssni:x%x unexp event %d recv from did:x%x "
"in rn state[offline]\n", csio_rn_flowid(rn), evt,
rn->nport_id);
CSIO_INC_STATS(rn, n_evt_unexp);
break;
}
}
/*
* csio_rns_disappeared -
* @rn - rnode
* @evt - SM event.
*
*/
static void
csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
int ret = 0;
CSIO_INC_STATS(rn, n_evt_sm[evt]);
switch (evt) {
case CSIO_RNFE_LOGGED_IN:
case CSIO_RNFE_PLOGI_RECV:
ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
if (!ret) {
csio_set_state(&rn->sm, csio_rns_ready);
__csio_reg_rnode(rn);
} else {
CSIO_INC_STATS(rn, n_err_inval);
csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
}
break;
case CSIO_RNFE_CLOSE:
/* Each rnode receives CLOSE event when driver is removed or
* device is reset.
* Note: All outstanding IOs on remote port need to returned
* to uppper layer with appropriate error before sending
* CLOSE event
*/
csio_set_state(&rn->sm, csio_rns_uninit);
break;
case CSIO_RNFE_DOWN:
case CSIO_RNFE_NAME_MISSING:
csio_ln_dbg(ln,
"ssni:x%x Ignoring event %d recv from did x%x"
"in rn state[disappeared]\n", csio_rn_flowid(rn),
evt, rn->nport_id);
break;
default:
csio_ln_dbg(ln,
"ssni:x%x unexp event %d recv from did x%x"
"in rn state[disappeared]\n", csio_rn_flowid(rn),
evt, rn->nport_id);
CSIO_INC_STATS(rn, n_evt_unexp);
break;
}
}
/*****************************************************************************/
/* END: Rnode SM */
/*****************************************************************************/
/*
* csio_rnode_devloss_handler - Device loss event handler
* @rn: rnode
*
* Post event to close rnode SM and free rnode.
*/
void
csio_rnode_devloss_handler(struct csio_rnode *rn)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
/* ignore if same rnode came back as online */
if (csio_is_rnode_ready(rn))
return;
csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
/* Free rn if in uninit state */
if (csio_is_rnode_uninit(rn))
csio_put_rnode(ln, rn);
}
/**
* csio_rnode_fwevt_handler - Event handler for firmware rnode events.
* @rn: rnode
*
*/
void
csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
{
struct csio_lnode *ln = csio_rnode_to_lnode(rn);
enum csio_rn_ev evt;
evt = CSIO_FWE_TO_RNFE(fwevt);
if (!evt) {
csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
csio_rn_flowid(rn), fwevt);
CSIO_INC_STATS(rn, n_evt_unexp);
return;
}
CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
/* Track previous & current events for debugging */
rn->prev_evt = rn->cur_evt;
rn->cur_evt = fwevt;
/* Post event to rnode SM */
csio_post_event(&rn->sm, evt);
/* Free rn if in uninit state */
if (csio_is_rnode_uninit(rn))
csio_put_rnode(ln, rn);
}
/*
* csio_rnode_init - Initialize rnode.
* @rn: RNode
* @ln: Associated lnode
*
* Caller is responsible for holding the lock. The lock is required
* to be held for inserting the rnode in ln->rnhead list.
*/
static int
csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
{
csio_rnode_to_lnode(rn) = ln;
csio_init_state(&rn->sm, csio_rns_uninit);
INIT_LIST_HEAD(&rn->host_cmpl_q);
csio_rn_flowid(rn) = CSIO_INVALID_IDX;
/* Add rnode to list of lnodes->rnhead */
list_add_tail(&rn->sm.sm_list, &ln->rnhead);
return 0;
}
static void
csio_rnode_exit(struct csio_rnode *rn)
{
list_del_init(&rn->sm.sm_list);
CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
}

View File

@ -0,0 +1,141 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_RNODE_H__
#define __CSIO_RNODE_H__
#include "csio_defs.h"
/* State machine evets */
enum csio_rn_ev {
CSIO_RNFE_NONE = (uint32_t)0, /* None */
CSIO_RNFE_LOGGED_IN, /* [N/F]Port login
* complete.
*/
CSIO_RNFE_PRLI_DONE, /* PRLI completed */
CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */
CSIO_RNFE_PRLI_RECV, /* Received PLOGI */
CSIO_RNFE_LOGO_RECV, /* Received LOGO */
CSIO_RNFE_PRLO_RECV, /* Received PRLO */
CSIO_RNFE_DOWN, /* Rnode is down */
CSIO_RNFE_CLOSE, /* Close rnode */
CSIO_RNFE_NAME_MISSING, /* Rnode name missing
* in name server.
*/
CSIO_RNFE_MAX_EVENT,
};
/* rnode stats */
struct csio_rnode_stats {
uint32_t n_err; /* error */
uint32_t n_err_inval; /* invalid parameter */
uint32_t n_err_nomem; /* error nomem */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* unexpected event */
uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
uint32_t n_lun_rst; /* Number of resets of
* of LUNs under this
* target
*/
uint32_t n_lun_rst_fail; /* Number of LUN reset
* failures.
*/
uint32_t n_tgt_rst; /* Number of target resets */
uint32_t n_tgt_rst_fail; /* Number of target reset
* failures.
*/
};
/* Defines for rnode role */
#define CSIO_RNFR_INITIATOR 0x1
#define CSIO_RNFR_TARGET 0x2
#define CSIO_RNFR_FABRIC 0x4
#define CSIO_RNFR_NS 0x8
#define CSIO_RNFR_NPORT 0x10
struct csio_rnode {
struct csio_sm sm; /* State machine -
* should be the
* 1st member
*/
struct csio_lnode *lnp; /* Pointer to owning
* Lnode */
uint32_t flowid; /* Firmware ID */
struct list_head host_cmpl_q; /* SCSI IOs
* pending to completed
* to Mid-layer.
*/
/* FC identifiers for remote node */
uint32_t nport_id;
uint16_t fcp_flags; /* FCP Flags */
uint8_t cur_evt; /* Current event */
uint8_t prev_evt; /* Previous event */
uint32_t role; /* Fabric/Target/
* Initiator/NS
*/
struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */
struct csio_service_parms rn_sparm;
/* FC transport attributes */
struct fc_rport *rport; /* FC transport rport */
uint32_t supp_classes; /* Supported FC classes */
uint32_t maxframe_size; /* Max Frame size */
uint32_t scsi_id; /* Transport given SCSI id */
struct csio_rnode_stats stats; /* Common rnode stats */
};
#define csio_rn_flowid(rn) ((rn)->flowid)
#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn)
#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn)
#define csio_rnode_to_lnode(rn) ((rn)->lnp)
int csio_is_rnode_ready(struct csio_rnode *rn);
void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str);
struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t);
struct csio_rnode *csio_confirm_rnode(struct csio_lnode *,
uint32_t, struct fcoe_rdev_entry *);
void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt);
void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);
void csio_reg_rnode(struct csio_rnode *);
void csio_unreg_rnode(struct csio_rnode *);
void csio_rnode_devloss_handler(struct csio_rnode *);
#endif /* ifndef __CSIO_RNODE_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,342 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_SCSI_H__
#define __CSIO_SCSI_H__
#include <linux/spinlock_types.h>
#include <linux/completion.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_fcp.h>
#include "csio_defs.h"
#include "csio_wr.h"
extern struct scsi_host_template csio_fcoe_shost_template;
extern struct scsi_host_template csio_fcoe_shost_vport_template;
extern int csio_scsi_eqsize;
extern int csio_scsi_iqlen;
extern int csio_scsi_ioreqs;
extern uint32_t csio_max_scan_tmo;
extern uint32_t csio_delta_scan_tmo;
extern int csio_lun_qdepth;
/*
**************************** NOTE *******************************
* How do we calculate MAX FCoE SCSI SGEs? Here is the math:
* Max Egress WR size = 512 bytes
* One SCSI egress WR has the following fixed no of bytes:
* 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR
* + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD
* ------
* 80
* ------
* That leaves us with 512 - 96 = 432 bytes for data SGE. Using
* struct ulptx_sgl header for the SGE consumes:
* - 4 bytes for cmnd_sge.
* - 12 bytes for the first SGL.
* That leaves us with 416 bytes for the remaining SGE pairs. Which is
* is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs,
* or 34 SGEs. Adding the first SGE fetches us 35 SGEs.
*/
#define CSIO_SCSI_MAX_SGE 35
#define CSIO_SCSI_ABRT_TMO_MS 60000
#define CSIO_SCSI_LUNRST_TMO_MS 60000
#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than
* all TM timeouts.
*/
#define CSIO_SCSI_IQ_WRSZ 128
#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ)
#define CSIO_MAX_SNS_LEN 128
#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN)
/* Reference to scsi_cmnd */
#define csio_scsi_cmnd(req) ((req)->scratch1)
struct csio_scsi_stats {
uint64_t n_tot_success; /* Total number of good I/Os */
uint32_t n_rn_nr_error; /* No. of remote-node-not-
* ready errors
*/
uint32_t n_hw_nr_error; /* No. of hw-module-not-
* ready errors
*/
uint32_t n_dmamap_error; /* No. of DMA map erros */
uint32_t n_unsupp_sge_error; /* No. of too-many-SGes
* errors.
*/
uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */
uint32_t n_busy_error; /* No. of -EBUSY errors */
uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */
uint32_t n_rsperror; /* No. of response errors */
uint32_t n_autosense; /* No. of auto sense replies */
uint32_t n_ovflerror; /* No. of overflow errors */
uint32_t n_unflerror; /* No. of underflow errors */
uint32_t n_rdev_nr_error;/* No. of rdev not
* ready errors
*/
uint32_t n_rdev_lost_error;/* No. of rdev lost errors */
uint32_t n_rdev_logo_error;/* No. of rdev logo errors */
uint32_t n_link_down_error;/* No. of link down errors */
uint32_t n_no_xchg_error; /* No. no exchange error */
uint32_t n_unknown_error;/* No. of unhandled errors */
uint32_t n_aborted; /* No. of aborted I/Os */
uint32_t n_abrt_timedout; /* No. of abort timedouts */
uint32_t n_abrt_fail; /* No. of abort failures */
uint32_t n_abrt_dups; /* No. of duplicate aborts */
uint32_t n_abrt_race_comp; /* No. of aborts that raced
* with completions.
*/
uint32_t n_abrt_busy_error;/* No. of abort failures
* due to -EBUSY.
*/
uint32_t n_closed; /* No. of closed I/Os */
uint32_t n_cls_busy_error; /* No. of close failures
* due to -EBUSY.
*/
uint32_t n_active; /* No. of IOs in active_q */
uint32_t n_tm_active; /* No. of TMs in active_q */
uint32_t n_wcbfn; /* No. of I/Os in worker
* cbfn q
*/
uint32_t n_free_ioreq; /* No. of freelist entries */
uint32_t n_free_ddp; /* No. of DDP freelist */
uint32_t n_unaligned; /* No. of Unaligned SGls */
uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */
uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/
};
struct csio_scsim {
struct csio_hw *hw; /* Pointer to HW moduel */
uint8_t max_sge; /* Max SGE */
uint8_t proto_cmd_len; /* Proto specific SCSI
* cmd length
*/
uint16_t proto_rsp_len; /* Proto specific SCSI
* response length
*/
spinlock_t freelist_lock; /* Lock for ioreq freelist */
struct list_head active_q; /* Outstanding SCSI I/Os */
struct list_head ioreq_freelist; /* Free list of ioreq's */
struct list_head ddp_freelist; /* DDP descriptor freelist */
struct csio_scsi_stats stats; /* This module's statistics */
};
/* State machine defines */
enum csio_scsi_ev {
CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */
CSIO_SCSIE_START_TM, /* Start a TM IO */
CSIO_SCSIE_COMPLETED, /* IO Completed */
CSIO_SCSIE_ABORT, /* Abort IO */
CSIO_SCSIE_ABORTED, /* IO Aborted */
CSIO_SCSIE_CLOSE, /* Close exchange */
CSIO_SCSIE_CLOSED, /* Exchange closed */
CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually
* cleanup this I/O.
*/
};
enum csio_scsi_lev {
CSIO_LEV_ALL = 1,
CSIO_LEV_LNODE,
CSIO_LEV_RNODE,
CSIO_LEV_LUN,
};
struct csio_scsi_level_data {
enum csio_scsi_lev level;
struct csio_rnode *rnode;
struct csio_lnode *lnode;
uint64_t oslun;
};
static inline struct csio_ioreq *
csio_get_scsi_ioreq(struct csio_scsim *scm)
{
struct csio_sm *req;
if (likely(!list_empty(&scm->ioreq_freelist))) {
req = list_first_entry(&scm->ioreq_freelist,
struct csio_sm, sm_list);
list_del_init(&req->sm_list);
CSIO_DEC_STATS(scm, n_free_ioreq);
return (struct csio_ioreq *)req;
} else
return NULL;
}
static inline void
csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq)
{
list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
CSIO_INC_STATS(scm, n_free_ioreq);
}
static inline void
csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist,
int n)
{
list_splice_init(reqlist, &scm->ioreq_freelist);
scm->stats.n_free_ioreq += n;
}
static inline struct csio_dma_buf *
csio_get_scsi_ddp(struct csio_scsim *scm)
{
struct csio_dma_buf *ddp;
if (likely(!list_empty(&scm->ddp_freelist))) {
ddp = list_first_entry(&scm->ddp_freelist,
struct csio_dma_buf, list);
list_del_init(&ddp->list);
CSIO_DEC_STATS(scm, n_free_ddp);
return ddp;
} else
return NULL;
}
static inline void
csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp)
{
list_add_tail(&ddp->list, &scm->ddp_freelist);
CSIO_INC_STATS(scm, n_free_ddp);
}
static inline void
csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist,
int n)
{
list_splice_tail_init(reqlist, &scm->ddp_freelist);
scm->stats.n_free_ddp += n;
}
static inline void
csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED);
if (csio_list_deleted(&ioreq->sm.sm_list))
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED);
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED);
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_drvcleanup(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP);
}
/*
* csio_scsi_start_io - Kick starts the IO SM.
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_start_io(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO);
return ioreq->drv_status;
}
/*
* csio_scsi_start_tm - Kicks off the Task management IO SM.
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_start_tm(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM);
return ioreq->drv_status;
}
/*
* csio_scsi_abort - Abort an IO request
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_abort(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT);
return ioreq->drv_status;
}
/*
* csio_scsi_close - Close an IO request
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_close(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE);
return ioreq->drv_status;
}
void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *);
int csio_scsim_cleanup_io(struct csio_scsim *, bool abort);
int csio_scsim_cleanup_io_lnode(struct csio_scsim *,
struct csio_lnode *);
struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *,
void *, uint8_t **);
int csio_scsi_qconfig(struct csio_hw *);
int csio_scsim_init(struct csio_scsim *, struct csio_hw *);
void csio_scsim_exit(struct csio_scsim *);
#endif /* __CSIO_SCSI_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,512 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_WR_H__
#define __CSIO_WR_H__
#include <linux/cache.h>
#include "csio_defs.h"
#include "t4fw_api.h"
#include "t4fw_api_stor.h"
/*
* SGE register field values.
*/
#define X_INGPCIEBOUNDARY_32B 0
#define X_INGPCIEBOUNDARY_64B 1
#define X_INGPCIEBOUNDARY_128B 2
#define X_INGPCIEBOUNDARY_256B 3
#define X_INGPCIEBOUNDARY_512B 4
#define X_INGPCIEBOUNDARY_1024B 5
#define X_INGPCIEBOUNDARY_2048B 6
#define X_INGPCIEBOUNDARY_4096B 7
/* GTS register */
#define X_TIMERREG_COUNTER0 0
#define X_TIMERREG_COUNTER1 1
#define X_TIMERREG_COUNTER2 2
#define X_TIMERREG_COUNTER3 3
#define X_TIMERREG_COUNTER4 4
#define X_TIMERREG_COUNTER5 5
#define X_TIMERREG_RESTART_COUNTER 6
#define X_TIMERREG_UPDATE_CIDX 7
/*
* Egress Context field values
*/
#define X_FETCHBURSTMIN_16B 0
#define X_FETCHBURSTMIN_32B 1
#define X_FETCHBURSTMIN_64B 2
#define X_FETCHBURSTMIN_128B 3
#define X_FETCHBURSTMAX_64B 0
#define X_FETCHBURSTMAX_128B 1
#define X_FETCHBURSTMAX_256B 2
#define X_FETCHBURSTMAX_512B 3
#define X_HOSTFCMODE_NONE 0
#define X_HOSTFCMODE_INGRESS_QUEUE 1
#define X_HOSTFCMODE_STATUS_PAGE 2
#define X_HOSTFCMODE_BOTH 3
/*
* Ingress Context field values
*/
#define X_UPDATESCHEDULING_TIMER 0
#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
#define X_UPDATEDELIVERY_NONE 0
#define X_UPDATEDELIVERY_INTERRUPT 1
#define X_UPDATEDELIVERY_STATUS_PAGE 2
#define X_UPDATEDELIVERY_BOTH 3
#define X_INTERRUPTDESTINATION_PCIE 0
#define X_INTERRUPTDESTINATION_IQ 1
#define X_RSPD_TYPE_FLBUF 0
#define X_RSPD_TYPE_CPL 1
#define X_RSPD_TYPE_INTR 2
/* WR status is at the same position as retval in a CMD header */
#define csio_wr_status(_wr) \
(FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
struct csio_hw;
extern int csio_intr_coalesce_cnt;
extern int csio_intr_coalesce_time;
/* Ingress queue params */
struct csio_iq_params {
uint8_t iq_start:1;
uint8_t iq_stop:1;
uint8_t pfn:3;
uint8_t vfn;
uint16_t physiqid;
uint16_t iqid;
uint16_t fl0id;
uint16_t fl1id;
uint8_t viid;
uint8_t type;
uint8_t iqasynch;
uint8_t reserved4;
uint8_t iqandst;
uint8_t iqanus;
uint8_t iqanud;
uint16_t iqandstindex;
uint8_t iqdroprss;
uint8_t iqpciech;
uint8_t iqdcaen;
uint8_t iqdcacpu;
uint8_t iqintcntthresh;
uint8_t iqo;
uint8_t iqcprio;
uint8_t iqesize;
uint16_t iqsize;
uint64_t iqaddr;
uint8_t iqflintiqhsen;
uint8_t reserved5;
uint8_t iqflintcongen;
uint8_t iqflintcngchmap;
uint32_t reserved6;
uint8_t fl0hostfcmode;
uint8_t fl0cprio;
uint8_t fl0paden;
uint8_t fl0packen;
uint8_t fl0congen;
uint8_t fl0dcaen;
uint8_t fl0dcacpu;
uint8_t fl0fbmin;
uint8_t fl0fbmax;
uint8_t fl0cidxfthresho;
uint8_t fl0cidxfthresh;
uint16_t fl0size;
uint64_t fl0addr;
uint64_t reserved7;
uint8_t fl1hostfcmode;
uint8_t fl1cprio;
uint8_t fl1paden;
uint8_t fl1packen;
uint8_t fl1congen;
uint8_t fl1dcaen;
uint8_t fl1dcacpu;
uint8_t fl1fbmin;
uint8_t fl1fbmax;
uint8_t fl1cidxfthresho;
uint8_t fl1cidxfthresh;
uint16_t fl1size;
uint64_t fl1addr;
};
/* Egress queue params */
struct csio_eq_params {
uint8_t pfn;
uint8_t vfn;
uint8_t eqstart:1;
uint8_t eqstop:1;
uint16_t physeqid;
uint32_t eqid;
uint8_t hostfcmode:2;
uint8_t cprio:1;
uint8_t pciechn:3;
uint16_t iqid;
uint8_t dcaen:1;
uint8_t dcacpu:5;
uint8_t fbmin:3;
uint8_t fbmax:3;
uint8_t cidxfthresho:1;
uint8_t cidxfthresh:3;
uint16_t eqsize;
uint64_t eqaddr;
};
struct csio_dma_buf {
struct list_head list;
void *vaddr; /* Virtual address */
dma_addr_t paddr; /* Physical address */
uint32_t len; /* Buffer size */
};
/* Generic I/O request structure */
struct csio_ioreq {
struct csio_sm sm; /* SM, List
* should be the first member
*/
int iq_idx; /* Ingress queue index */
int eq_idx; /* Egress queue index */
uint32_t nsge; /* Number of SG elements */
uint32_t tmo; /* Driver timeout */
uint32_t datadir; /* Data direction */
struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */
uint16_t wr_status; /* WR completion status */
int16_t drv_status; /* Driver internal status */
struct csio_lnode *lnode; /* Owner lnode */
struct csio_rnode *rnode; /* Src/destination rnode */
void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);
/* completion callback */
void *scratch1; /* Scratch area 1.
*/
void *scratch2; /* Scratch area 2. */
struct list_head gen_list; /* Any list associated with
* this ioreq.
*/
uint64_t fw_handle; /* Unique handle passed
* to FW
*/
uint8_t dcopy; /* Data copy required */
uint8_t reserved1;
uint16_t reserved2;
struct completion cmplobj; /* ioreq completion object */
} ____cacheline_aligned_in_smp;
/*
* Egress status page for egress cidx updates
*/
struct csio_qstatus_page {
__be32 qid;
__be16 cidx;
__be16 pidx;
};
enum {
CSIO_MAX_FLBUF_PER_IQWR = 4,
CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments
* in bytes
*/
CSIO_MAX_QID = 0xFFFF,
CSIO_MAX_IQ = 128,
CSIO_SGE_NTIMERS = 6,
CSIO_SGE_NCOUNTERS = 4,
CSIO_SGE_FL_SIZE_REGS = 16,
};
/* Defines for type */
enum {
CSIO_EGRESS = 1,
CSIO_INGRESS = 2,
CSIO_FREELIST = 3,
};
/*
* Structure for footer (last 2 flits) of Ingress Queue Entry.
*/
struct csio_iqwr_footer {
__be32 hdrbuflen_pidx;
__be32 pldbuflen_qid;
union {
u8 type_gen;
__be64 last_flit;
} u;
};
#define IQWRF_NEWBUF (1 << 31)
#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)
#define IQWRF_GEN_SHIFT 7
#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)
/*
* WR pair:
* ========
* A WR can start towards the end of a queue, and then continue at the
* beginning, since the queue is considered to be circular. This will
* require a pair of address/len to be passed back to the caller -
* hence the Work request pair structure.
*/
struct csio_wr_pair {
void *addr1;
uint32_t size1;
void *addr2;
uint32_t size2;
};
/*
* The following structure is used by ingress processing to return the
* free list buffers to consumers.
*/
struct csio_fl_dma_buf {
struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR];
/* Freelist DMA buffers */
int offset; /* Offset within the
* first FL buf.
*/
uint32_t totlen; /* Total length */
uint8_t defer_free; /* Free of buffer can
* deferred
*/
};
/* Data-types */
typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *, void *);
struct csio_iq {
uint16_t iqid; /* Queue ID */
uint16_t physiqid; /* Physical Queue ID */
uint16_t genbit; /* Generation bit,
* initially set to 1
*/
int flq_idx; /* Freelist queue index */
iq_handler_t iq_intx_handler; /* IQ INTx handler routine */
};
struct csio_eq {
uint16_t eqid; /* Qid */
uint16_t physeqid; /* Physical Queue ID */
uint8_t wrap[512]; /* Temp area for q-wrap around*/
};
struct csio_fl {
uint16_t flid; /* Qid */
uint16_t packen; /* Packing enabled? */
int offset; /* Offset within FL buf */
int sreg; /* Size register */
struct csio_dma_buf *bufs; /* Free list buffer ptr array
* indexed using flq->cidx/pidx
*/
};
struct csio_qstats {
uint32_t n_tot_reqs; /* Total no. of Requests */
uint32_t n_tot_rsps; /* Total no. of responses */
uint32_t n_qwrap; /* Queue wraps */
uint32_t n_eq_wr_split; /* Number of split EQ WRs */
uint32_t n_qentry; /* Queue entry */
uint32_t n_qempty; /* Queue empty */
uint32_t n_qfull; /* Queue fulls */
uint32_t n_rsp_unknown; /* Unknown response type */
uint32_t n_stray_comp; /* Stray completion intr */
uint32_t n_flq_refill; /* Number of FL refills */
};
/* Queue metadata */
struct csio_q {
uint16_t type; /* Type: Ingress/Egress/FL */
uint16_t pidx; /* producer index */
uint16_t cidx; /* consumer index */
uint16_t inc_idx; /* Incremental index */
uint32_t wr_sz; /* Size of all WRs in this q
* if fixed
*/
void *vstart; /* Base virtual address
* of queue
*/
void *vwrap; /* Virtual end address to
* wrap around at
*/
uint32_t credits; /* Size of queue in credits */
void *owner; /* Owner */
union { /* Queue contexts */
struct csio_iq iq;
struct csio_eq eq;
struct csio_fl fl;
} un;
dma_addr_t pstart; /* Base physical address of
* queue
*/
uint32_t portid; /* PCIE Channel */
uint32_t size; /* Size of queue in bytes */
struct csio_qstats stats; /* Statistics */
} ____cacheline_aligned_in_smp;
struct csio_sge {
uint32_t csio_fl_align; /* Calculated and cached
* for fast path
*/
uint32_t sge_control; /* padding, boundaries,
* lengths, etc.
*/
uint32_t sge_host_page_size; /* Host page size */
uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];
/* free list buffer sizes */
uint16_t timer_val[CSIO_SGE_NTIMERS];
uint8_t counter_val[CSIO_SGE_NCOUNTERS];
};
/* Work request module */
struct csio_wrm {
int num_q; /* Number of queues */
struct csio_q **q_arr; /* Array of queue pointers
* allocated dynamically
* based on configured values
*/
uint32_t fw_iq_start; /* Start ID of IQ for this fn*/
uint32_t fw_eq_start; /* Start ID of EQ for this fn*/
struct csio_q *intr_map[CSIO_MAX_IQ];
/* IQ-id to IQ map table. */
int free_qidx; /* queue idx of free queue */
struct csio_sge sge; /* SGE params */
};
#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])
#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)
#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)
#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)
#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)
#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)
#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)
#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)
#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)
#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
#define csio_q_physiqid(__hw, __idx) \
((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
#define csio_q_iq_flq_idx(__hw, __idx) \
((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
#define csio_q_physeqid(__hw, __idx) \
((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
#define csio_q_iq_to_flid(__hw, __iq_idx) \
csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \
(__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
struct csio_mb;
int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,
uint16_t, void *, uint32_t, int, iq_handler_t);
int csio_wr_iq_create(struct csio_hw *, void *, int,
uint32_t, uint8_t, bool,
void (*)(struct csio_hw *, struct csio_mb *));
int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,
void (*)(struct csio_hw *, struct csio_mb *));
int csio_wr_destroy_queues(struct csio_hw *, bool cmd);
int csio_wr_get(struct csio_hw *, int, uint32_t,
struct csio_wr_pair *);
void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);
int csio_wr_issue(struct csio_hw *, int, bool);
int csio_wr_process_iq(struct csio_hw *, struct csio_q *,
void (*)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *);
int csio_wr_process_iq_idx(struct csio_hw *, int,
void (*)(struct csio_hw *, void *,
uint32_t, struct csio_fl_dma_buf *,
void *),
void *);
void csio_wr_sge_init(struct csio_hw *);
int csio_wrm_init(struct csio_wrm *, struct csio_hw *);
void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);
#endif /* ifndef __CSIO_WR_H__ */

View File

@ -0,0 +1,578 @@
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _T4FW_API_STOR_H_
#define _T4FW_API_STOR_H_
/******************************************************************************
* R E T U R N V A L U E S
********************************/
enum fw_retval {
FW_SUCCESS = 0, /* completed sucessfully */
FW_EPERM = 1, /* operation not permitted */
FW_ENOENT = 2, /* no such file or directory */
FW_EIO = 5, /* input/output error; hw bad */
FW_ENOEXEC = 8, /* exec format error; inv microcode */
FW_EAGAIN = 11, /* try again */
FW_ENOMEM = 12, /* out of memory */
FW_EFAULT = 14, /* bad address; fw bad */
FW_EBUSY = 16, /* resource busy */
FW_EEXIST = 17, /* file exists */
FW_EINVAL = 22, /* invalid argument */
FW_ENOSPC = 28, /* no space left on device */
FW_ENOSYS = 38, /* functionality not implemented */
FW_EPROTO = 71, /* protocol error */
FW_EADDRINUSE = 98, /* address already in use */
FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
FW_ENETDOWN = 100, /* network is down */
FW_ENETUNREACH = 101, /* network is unreachable */
FW_ENOBUFS = 105, /* no buffer space available */
FW_ETIMEDOUT = 110, /* timeout */
FW_EINPROGRESS = 115, /* fw internal */
FW_SCSI_ABORT_REQUESTED = 128, /* */
FW_SCSI_ABORT_TIMEDOUT = 129, /* */
FW_SCSI_ABORTED = 130, /* */
FW_SCSI_CLOSE_REQUESTED = 131, /* */
FW_ERR_LINK_DOWN = 132, /* */
FW_RDEV_NOT_READY = 133, /* */
FW_ERR_RDEV_LOST = 134, /* */
FW_ERR_RDEV_LOGO = 135, /* */
FW_FCOE_NO_XCHG = 136, /* */
FW_SCSI_RSP_ERR = 137, /* */
FW_ERR_RDEV_IMPL_LOGO = 138, /* */
FW_SCSI_UNDER_FLOW_ERR = 139, /* */
FW_SCSI_OVER_FLOW_ERR = 140, /* */
FW_SCSI_DDP_ERR = 141, /* DDP error*/
FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
};
enum fw_fcoe_link_sub_op {
FCOE_LINK_DOWN = 0x0,
FCOE_LINK_UP = 0x1,
FCOE_LINK_COND = 0x2,
};
enum fw_fcoe_link_status {
FCOE_LINKDOWN = 0x0,
FCOE_LINKUP = 0x1,
};
enum fw_ofld_prot {
PROT_FCOE = 0x1,
PROT_ISCSI = 0x2,
};
enum rport_type_fcoe {
FLOGI_VFPORT = 0x1, /* 0xfffffe */
FDISC_VFPORT = 0x2, /* 0xfffffe */
NS_VNPORT = 0x3, /* 0xfffffc */
REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */
REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */
FDMI_VNPORT = 0x6, /* 0xfffffa */
FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */
};
enum event_cause_fcoe {
PLOGI_ACC_RCVD = 0x01,
PLOGI_RJT_RCVD = 0x02,
PLOGI_RCVD = 0x03,
PLOGO_RCVD = 0x04,
PRLI_ACC_RCVD = 0x05,
PRLI_RJT_RCVD = 0x06,
PRLI_RCVD = 0x07,
PRLO_RCVD = 0x08,
NPORT_ID_CHGD = 0x09,
FLOGO_RCVD = 0x0a,
CLR_VIRT_LNK_RCVD = 0x0b,
FLOGI_ACC_RCVD = 0x0c,
FLOGI_RJT_RCVD = 0x0d,
FDISC_ACC_RCVD = 0x0e,
FDISC_RJT_RCVD = 0x0f,
FLOGI_TMO_MAX_RETRY = 0x10,
IMPL_LOGO_ADISC_ACC = 0x11,
IMPL_LOGO_ADISC_RJT = 0x12,
IMPL_LOGO_ADISC_CNFLT = 0x13,
PRLI_TMO = 0x14,
ADISC_TMO = 0x15,
RSCN_DEV_LOST = 0x16,
SCR_ACC_RCVD = 0x17,
ADISC_RJT_RCVD = 0x18,
LOGO_SNT = 0x19,
PROTO_ERR_IMPL_LOGO = 0x1a,
};
enum fcoe_cmn_type {
FCOE_ELS,
FCOE_CT,
FCOE_SCSI_CMD,
FCOE_UNSOL_ELS,
};
enum fw_wr_stor_opcodes {
FW_RDEV_WR = 0x38,
FW_FCOE_ELS_CT_WR = 0x30,
FW_SCSI_WRITE_WR = 0x31,
FW_SCSI_READ_WR = 0x32,
FW_SCSI_CMD_WR = 0x33,
FW_SCSI_ABRT_CLS_WR = 0x34,
};
struct fw_rdev_wr {
__be32 op_to_immdlen;
__be32 alloc_to_len16;
__be64 cookie;
u8 protocol;
u8 event_cause;
u8 cur_state;
u8 prev_state;
__be32 flags_to_assoc_flowid;
union rdev_entry {
struct fcoe_rdev_entry {
__be32 flowid;
u8 protocol;
u8 event_cause;
u8 flags;
u8 rjt_reason;
u8 cur_login_st;
u8 prev_login_st;
__be16 rcv_fr_sz;
u8 rd_xfer_rdy_to_rport_type;
u8 vft_to_qos;
u8 org_proc_assoc_to_acc_rsp_code;
u8 enh_disc_to_tgt;
u8 wwnn[8];
u8 wwpn[8];
__be16 iqid;
u8 fc_oui[3];
u8 r_id[3];
} fcoe_rdev;
struct iscsi_rdev_entry {
__be32 flowid;
u8 protocol;
u8 event_cause;
u8 flags;
u8 r3;
__be16 iscsi_opts;
__be16 tcp_opts;
__be16 ip_opts;
__be16 max_rcv_len;
__be16 max_snd_len;
__be16 first_brst_len;
__be16 max_brst_len;
__be16 r4;
__be16 def_time2wait;
__be16 def_time2ret;
__be16 nop_out_intrvl;
__be16 non_scsi_to;
__be16 isid;
__be16 tsid;
__be16 port;
__be16 tpgt;
u8 r5[6];
__be16 iqid;
} iscsi_rdev;
} u;
};
#define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff)
#define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff)
#define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f)
#define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1)
#define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3)
#define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1)
#define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1)
#define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1)
#define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1)
#define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1)
struct fw_fcoe_els_ct_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 els_ct_type;
u8 ctl_pri;
u8 cp_en_class;
__be16 xfer_cnt;
u8 fl_to_sp;
u8 l_id[3];
u8 r5;
u8 r_id[3];
__be64 rsp_dmaaddr;
__be32 rsp_dmalen;
__be32 r6;
};
#define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24)
#define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff)
#define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0)
#define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff)
#define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0)
struct fw_scsi_write_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 use_xfer_cnt;
union fw_scsi_write_priv {
struct fcoe_write_priv {
u8 ctl_pri;
u8 cp_en_class;
u8 r3_lo[2];
} fcoe;
struct iscsi_write_priv {
u8 r3[4];
} iscsi;
} u;
__be32 xfer_cnt;
__be32 ini_xfer_cnt;
__be64 rsp_dmaaddr;
__be32 rsp_dmalen;
__be32 r4;
};
#define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0)
struct fw_scsi_read_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 use_xfer_cnt;
union fw_scsi_read_priv {
struct fcoe_read_priv {
u8 ctl_pri;
u8 cp_en_class;
u8 r3_lo[2];
} fcoe;
struct iscsi_read_priv {
u8 r3[4];
} iscsi;
} u;
__be32 xfer_cnt;
__be32 ini_xfer_cnt;
__be64 rsp_dmaaddr;
__be32 rsp_dmalen;
__be32 r4;
};
#define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0)
struct fw_scsi_cmd_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 r3;
union fw_scsi_cmd_priv {
struct fcoe_cmd_priv {
u8 ctl_pri;
u8 cp_en_class;
u8 r4_lo[2];
} fcoe;
struct iscsi_cmd_priv {
u8 r4[4];
} iscsi;
} u;
u8 r5[8];
__be64 rsp_dmaaddr;
__be32 rsp_dmalen;
__be32 r6;
};
#define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0)
#define SCSI_ABORT 0
#define SCSI_CLOSE 1
struct fw_scsi_abrt_cls_wr {
__be32 op_immdlen;
__be32 flowid_len16;
__be64 cookie;
__be16 iqid;
u8 tmo_val;
u8 sub_opcode_to_chk_all_io;
u8 r3[4];
__be64 t_cookie;
};
#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2)
#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f)
#define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0)
enum fw_cmd_stor_opcodes {
FW_FCOE_RES_INFO_CMD = 0x31,
FW_FCOE_LINK_CMD = 0x32,
FW_FCOE_VNP_CMD = 0x33,
FW_FCOE_SPARAMS_CMD = 0x35,
FW_FCOE_STATS_CMD = 0x37,
FW_FCOE_FCF_CMD = 0x38,
};
struct fw_fcoe_res_info_cmd {
__be32 op_to_read;
__be32 retval_len16;
__be16 e_d_tov;
__be16 r_a_tov_seq;
__be16 r_a_tov_els;
__be16 r_r_tov;
__be32 max_xchgs;
__be32 max_ssns;
__be32 used_xchgs;
__be32 used_ssns;
__be32 max_fcfs;
__be32 max_vnps;
__be32 used_fcfs;
__be32 used_vnps;
};
struct fw_fcoe_link_cmd {
__be32 op_to_portid;
__be32 retval_len16;
__be32 sub_opcode_fcfi;
u8 r3;
u8 lstatus;
__be16 flags;
u8 r4;
u8 set_vlan;
__be16 vlan_id;
__be32 vnpi_pkd;
__be16 r6;
u8 phy_mac[6];
u8 vnport_wwnn[8];
u8 vnport_wwpn[8];
};
#define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0)
#define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
#define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U)
#define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0)
#define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff)
#define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
struct fw_fcoe_vnp_cmd {
__be32 op_to_fcfi;
__be32 alloc_to_len16;
__be32 gen_wwn_to_vnpi;
__be32 vf_id;
__be16 iqid;
u8 vnport_mac[6];
u8 vnport_wwnn[8];
u8 vnport_wwpn[8];
u8 cmn_srv_parms[16];
u8 clsp_word_0_1[8];
};
#define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0)
#define FW_FCOE_VNP_CMD_ALLOC (1U << 31)
#define FW_FCOE_VNP_CMD_FREE (1U << 30)
#define FW_FCOE_VNP_CMD_MODIFY (1U << 29)
#define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22)
#define FW_FCOE_VNP_CMD_VFID_EN (1U << 20)
#define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0)
#define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
struct fw_fcoe_sparams_cmd {
__be32 op_to_portid;
__be32 retval_len16;
u8 r3[7];
u8 cos;
u8 lport_wwnn[8];
u8 lport_wwpn[8];
u8 cmn_srv_parms[16];
u8 cls_srv_parms[16];
};
#define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0)
struct fw_fcoe_stats_cmd {
__be32 op_to_flowid;
__be32 free_to_len16;
union fw_fcoe_stats {
struct fw_fcoe_stats_ctl {
u8 nstats_port;
u8 port_valid_ix;
__be16 r6;
__be32 r7;
__be64 stat0;
__be64 stat1;
__be64 stat2;
__be64 stat3;
__be64 stat4;
__be64 stat5;
} ctl;
struct fw_fcoe_port_stats {
__be64 tx_bcast_bytes;
__be64 tx_bcast_frames;
__be64 tx_mcast_bytes;
__be64 tx_mcast_frames;
__be64 tx_ucast_bytes;
__be64 tx_ucast_frames;
__be64 tx_drop_frames;
__be64 tx_offload_bytes;
__be64 tx_offload_frames;
__be64 rx_bcast_bytes;
__be64 rx_bcast_frames;
__be64 rx_mcast_bytes;
__be64 rx_mcast_frames;
__be64 rx_ucast_bytes;
__be64 rx_ucast_frames;
__be64 rx_err_frames;
} port_stats;
struct fw_fcoe_fcf_stats {
__be32 fip_tx_bytes;
__be32 fip_tx_fr;
__be64 fcf_ka;
__be64 mcast_adv_rcvd;
__be16 ucast_adv_rcvd;
__be16 sol_sent;
__be16 vlan_req;
__be16 vlan_rpl;
__be16 clr_vlink;
__be16 link_down;
__be16 link_up;
__be16 logo;
__be16 flogi_req;
__be16 flogi_rpl;
__be16 fdisc_req;
__be16 fdisc_rpl;
__be16 fka_prd_chg;
__be16 fc_map_chg;
__be16 vfid_chg;
u8 no_fka_req;
u8 no_vnp;
} fcf_stats;
struct fw_fcoe_pcb_stats {
__be64 tx_bytes;
__be64 tx_frames;
__be64 rx_bytes;
__be64 rx_frames;
__be32 vnp_ka;
__be32 unsol_els_rcvd;
__be64 unsol_cmd_rcvd;
__be16 implicit_logo;
__be16 flogi_inv_sparm;
__be16 fdisc_inv_sparm;
__be16 flogi_rjt;
__be16 fdisc_rjt;
__be16 no_ssn;
__be16 mac_flt_fail;
__be16 inv_fr_rcvd;
} pcb_stats;
struct fw_fcoe_scb_stats {
__be64 tx_bytes;
__be64 tx_frames;
__be64 rx_bytes;
__be64 rx_frames;
__be32 host_abrt_req;
__be32 adap_auto_abrt;
__be32 adap_abrt_rsp;
__be32 host_ios_req;
__be16 ssn_offl_ios;
__be16 ssn_not_rdy_ios;
u8 rx_data_ddp_err;
u8 ddp_flt_set_err;
__be16 rx_data_fr_err;
u8 bad_st_abrt_req;
u8 no_io_abrt_req;
u8 abort_tmo;
u8 abort_tmo_2;
__be32 abort_req;
u8 no_ppod_res_tmo;
u8 bp_tmo;
u8 adap_auto_cls;
u8 no_io_cls_req;
__be32 host_cls_req;
__be64 unsol_cmd_rcvd;
__be32 plogi_req_rcvd;
__be32 prli_req_rcvd;
__be16 logo_req_rcvd;
__be16 prlo_req_rcvd;
__be16 plogi_rjt_rcvd;
__be16 prli_rjt_rcvd;
__be32 adisc_req_rcvd;
__be32 rscn_rcvd;
__be32 rrq_req_rcvd;
__be32 unsol_els_rcvd;
u8 adisc_rjt_rcvd;
u8 scr_rjt;
u8 ct_rjt;
u8 inval_bls_rcvd;
__be32 ba_rjt_rcvd;
} scb_stats;
} u;
};
#define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0)
#define FW_FCOE_STATS_CMD_FREE (1U << 30)
#define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4)
#define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0)
#define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7)
#define FW_FCOE_STATS_CMD_IX(x) ((x) << 0)
struct fw_fcoe_fcf_cmd {
__be32 op_to_fcfi;
__be32 retval_len16;
__be16 priority_pkd;
u8 mac[6];
u8 name_id[8];
u8 fabric[8];
__be16 vf_id;
__be16 max_fcoe_size;
u8 vlan_id;
u8 fc_map[3];
__be32 fka_adv;
__be32 r6;
u8 r7_hi;
u8 fpma_to_portid;
u8 spma_mac[6];
__be64 r8;
};
#define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0)
#define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff)
#define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff)
#define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1)
#define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1)
#define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1)
#define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
#endif /* _T4FW_API_STOR_H_ */