lpfc: Add support for using block multi-queue

With blk-mq support in the mid-layer, lpfc can do IO steering based
on the information in the request tag.  This patch allows lpfc to use
blk-mq if enabled. If not enabled, we fall back into the emulex-internal
affinity mappings.

This feature can be turned on via CONFIG_SCSI_MQ_DEFAULT or passing
scsi_mod.use_blk_mq=Y as a parameter to the kernel.

Signed-off-by: Dick Kennedy <dick.kennedy@avagotech.com>
Signed-off-by: James Smart <james.smart@avagotech.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: James Bottomley <JBottomley@Odin.com>
This commit is contained in:
James Smart 2015-05-22 10:42:38 -04:00 committed by James Bottomley
parent 953ceeda97
commit 8b0dff1416
4 changed files with 72 additions and 52 deletions

View File

@ -3303,6 +3303,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
shost->max_cmd_len = 16;
shost->nr_hw_queues = phba->cfg_fcp_io_channel;
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
@ -8980,7 +8981,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = vectors;
}
lpfc_sli4_set_affinity(phba, vectors);
if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
lpfc_sli4_set_affinity(phba, vectors);
return rc;
cfg_fail_out:

View File

@ -3845,6 +3845,49 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
/**
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
* @phba: Pointer to HBA context object.
*
* This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
* distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
* held.
* If scsi-mq is enabled, get the default block layer mapping of software queues
* to hardware queues. This information is saved in request tag.
*
* Return: index into SLI4 fast-path FCP queue index.
**/
int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd)
{
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct lpfc_vector_map_info *cpup;
int chann, cpu;
uint32_t tag;
uint16_t hwq;
if (shost_use_blk_mq(cmnd->device->host)) {
tag = blk_mq_unique_tag(cmnd->request);
hwq = blk_mq_unique_tag_to_hwq(tag);
return hwq;
}
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
&& phba->cfg_fcp_io_channel > 1) {
cpu = smp_processor_id();
if (cpu < phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map;
cpup += cpu;
return cpup->channel_id;
}
}
chann = atomic_add_return(1, &phba->fcp_qidx);
chann = (chann % phba->cfg_fcp_io_channel);
return chann;
}
/**
* lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
* @phba: The Hba for which this call is being executed.

View File

@ -184,3 +184,6 @@ struct lpfc_scsi_buf {
#define FIND_FIRST_OAS_LUN 0
#define NO_MORE_OAS_LUN -1
#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd);

View File

@ -8137,36 +8137,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
return sglq->sli4_xritag;
}
/**
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
* @phba: Pointer to HBA context object.
*
* This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
* distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
* held.
*
* Return: index into SLI4 fast-path FCP queue index.
**/
static inline int
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
struct lpfc_vector_map_info *cpup;
int chann, cpu;
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
&& phba->cfg_fcp_io_channel > 1) {
cpu = smp_processor_id();
if (cpu < phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map;
cpup += cpu;
return cpup->channel_id;
}
}
chann = atomic_add_return(1, &phba->fcp_qidx);
chann = (chann % phba->cfg_fcp_io_channel);
return chann;
}
/**
* lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
* @phba: Pointer to HBA context object.
@ -8807,27 +8777,29 @@ int
lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb)
{
if (phba->sli_rev == LPFC_SLI_REV4) {
if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (!(phba->cfg_fof) ||
(!(piocb->iocb_flag & LPFC_IO_FOF))) {
if (unlikely(!phba->sli4_hba.fcp_wq))
return LPFC_HBA_ERROR;
/*
* for abort iocb fcp_wqidx should already
* be setup based on what work queue we used.
*/
if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
piocb->fcp_wqidx =
lpfc_sli4_scmd_to_wqidx_distr(phba);
ring_number = MAX_SLI3_CONFIGURED_RINGS +
piocb->fcp_wqidx;
} else {
if (unlikely(!phba->sli4_hba.oas_wq))
return LPFC_HBA_ERROR;
piocb->fcp_wqidx = 0;
ring_number = LPFC_FCP_OAS_RING;
}
if (phba->sli_rev < LPFC_SLI_REV4)
return ring_number;
if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (!(phba->cfg_fof) ||
(!(piocb->iocb_flag & LPFC_IO_FOF))) {
if (unlikely(!phba->sli4_hba.fcp_wq))
return LPFC_HBA_ERROR;
/*
* for abort iocb fcp_wqidx should already
* be setup based on what work queue we used.
*/
if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
piocb->fcp_wqidx =
lpfc_sli4_scmd_to_wqidx_distr(phba,
piocb->context1);
ring_number = MAX_SLI3_CONFIGURED_RINGS +
piocb->fcp_wqidx;
} else {
if (unlikely(!phba->sli4_hba.oas_wq))
return LPFC_HBA_ERROR;
piocb->fcp_wqidx = 0;
ring_number = LPFC_FCP_OAS_RING;
}
}
return ring_number;