scsi: use host wide tags by default

This patch changes the !blk-mq path to the same defaults as the blk-mq
I/O path by always enabling block tagging, and always using host wide
tags.  We've had blk-mq available for a few releases so bugs with
this mode should have been ironed out, and this ensures we get better
coverage of over tagging setup over different configs.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: James Bottomley <JBottomley@Odin.com>
This commit is contained in:
Christoph Hellwig 2015-10-08 09:28:04 +01:00 committed by James Bottomley
parent 720ba808e9
commit 64d513ac31
36 changed files with 39 additions and 211 deletions

View File

@ -3689,9 +3689,6 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/ */
shost->max_host_blocked = 1; shost->max_host_blocked = 1;
if (scsi_init_shared_tag_map(shost, host->n_tags))
goto err_add;
rc = scsi_add_host_with_dma(ap->scsi_host, rc = scsi_add_host_with_dma(ap->scsi_host,
&ap->tdev, ap->host->dev); &ap->tdev, ap->host->dev);
if (rc) if (rc)

View File

@ -2750,7 +2750,6 @@ static struct scsi_host_template srp_template = {
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs, .shost_attrs = srp_host_attrs,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };
@ -3181,10 +3180,6 @@ static ssize_t srp_create_target(struct device *dev,
if (ret) if (ret)
goto out; goto out;
ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
if (ret)
goto out;
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
if (!srp_conn_unique(target->srp_host, target)) { if (!srp_conn_unique(target->srp_host, target)) {

View File

@ -1994,7 +1994,6 @@ static struct scsi_host_template mptsas_driver_template = {
.cmd_per_lun = 7, .cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs, .shost_attrs = mptscsih_host_attrs,
.use_blk_tags = 1,
}; };
static int mptsas_get_linkerrors(struct sas_phy *phy) static int mptsas_get_linkerrors(struct sas_phy *phy)

View File

@ -325,7 +325,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
tpnt->slave_destroy = NCR_700_slave_destroy; tpnt->slave_destroy = NCR_700_slave_destroy;
tpnt->slave_alloc = NCR_700_slave_alloc; tpnt->slave_alloc = NCR_700_slave_alloc;
tpnt->change_queue_depth = NCR_700_change_queue_depth; tpnt->change_queue_depth = NCR_700_change_queue_depth;
tpnt->use_blk_tags = 1;
if(tpnt->name == NULL) if(tpnt->name == NULL)
tpnt->name = "53c700"; tpnt->name = "53c700";
@ -1107,7 +1106,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
BUG(); BUG();
} }
if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) { if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]); struct scsi_cmnd *SCp;
SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]);
if(unlikely(SCp == NULL)) { if(unlikely(SCp == NULL)) {
printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n", printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
host->host_no, reselection_id, lun, hostdata->msgin[2]); host->host_no, reselection_id, lun, hostdata->msgin[2]);
@ -1119,7 +1120,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
"reselection is tag %d, slot %p(%d)\n", "reselection is tag %d, slot %p(%d)\n",
hostdata->msgin[2], slot, slot->tag); hostdata->msgin[2], slot, slot->tag);
} else { } else {
struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG); struct scsi_cmnd *SCp;
SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
if(unlikely(SCp == NULL)) { if(unlikely(SCp == NULL)) {
sdev_printk(KERN_ERR, SDp, sdev_printk(KERN_ERR, SDp,
"no saved request for untagged cmd\n"); "no saved request for untagged cmd\n");
@ -1823,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
slot->tag, slot); slot->tag, slot);
} else { } else {
slot->tag = SCSI_NO_TAG; slot->tag = SCSI_NO_TAG;
/* must populate current_cmnd for scsi_find_tag to work */ /* must populate current_cmnd for scsi_host_find_tag to work */
SCp->device->current_cmnd = SCp; SCp->device->current_cmnd = SCp;
} }
/* sanity check: some of the commands generated by the mid-layer /* sanity check: some of the commands generated by the mid-layer

View File

@ -10819,7 +10819,6 @@ static struct scsi_host_template advansys_template = {
* by enabling clustering, I/O throughput increases as well. * by enabling clustering, I/O throughput increases as well.
*/ */
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.use_blk_tags = 1,
}; };
static int advansys_wide_init_chip(struct Scsi_Host *shost) static int advansys_wide_init_chip(struct Scsi_Host *shost)
@ -11211,11 +11210,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
/* Set maximum number of queues the adapter can handle. */ /* Set maximum number of queues the adapter can handle. */
shost->can_queue = adv_dvc_varp->max_host_qng; shost->can_queue = adv_dvc_varp->max_host_qng;
} }
ret = scsi_init_shared_tag_map(shost, shost->can_queue);
if (ret) {
shost_printk(KERN_ERR, shost, "init tag map failed\n");
goto err_free_dma;
}
/* /*
* Set the maximum number of scatter-gather elements the * Set the maximum number of scatter-gather elements the

View File

@ -925,7 +925,6 @@ struct scsi_host_template aic79xx_driver_template = {
.slave_configure = ahd_linux_slave_configure, .slave_configure = ahd_linux_slave_configure,
.target_alloc = ahd_linux_target_alloc, .target_alloc = ahd_linux_target_alloc,
.target_destroy = ahd_linux_target_destroy, .target_destroy = ahd_linux_target_destroy,
.use_blk_tags = 1,
}; };
/******************************** Bus DMA *************************************/ /******************************** Bus DMA *************************************/

View File

@ -812,7 +812,6 @@ struct scsi_host_template aic7xxx_driver_template = {
.slave_configure = ahc_linux_slave_configure, .slave_configure = ahc_linux_slave_configure,
.target_alloc = ahc_linux_target_alloc, .target_alloc = ahc_linux_target_alloc,
.target_destroy = ahc_linux_target_destroy, .target_destroy = ahc_linux_target_destroy,
.use_blk_tags = 1,
}; };
/**************************** Tasklet Handler *********************************/ /**************************** Tasklet Handler *********************************/

View File

@ -73,7 +73,6 @@ static struct scsi_host_template aic94xx_sht = {
.eh_bus_reset_handler = sas_eh_bus_reset_handler, .eh_bus_reset_handler = sas_eh_bus_reset_handler,
.target_destroy = sas_target_destroy, .target_destroy = sas_target_destroy,
.ioctl = sas_ioctl, .ioctl = sas_ioctl,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -800,7 +800,6 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.shost_attrs = bfad_im_host_attrs, .shost_attrs = bfad_im_host_attrs,
.max_sectors = BFAD_MAX_SECTORS, .max_sectors = BFAD_MAX_SECTORS,
.vendor_id = BFA_PCI_VENDOR_ID_BROCADE, .vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
.use_blk_tags = 1,
}; };
struct scsi_host_template bfad_im_vport_template = { struct scsi_host_template bfad_im_vport_template = {
@ -822,7 +821,6 @@ struct scsi_host_template bfad_im_vport_template = {
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = bfad_im_vport_attrs, .shost_attrs = bfad_im_vport_attrs,
.max_sectors = BFAD_MAX_SECTORS, .max_sectors = BFAD_MAX_SECTORS,
.use_blk_tags = 1,
}; };
bfa_status_t bfa_status_t

View File

@ -2867,7 +2867,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
.max_sectors = 1024, .max_sectors = 1024,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -2283,7 +2283,6 @@ struct scsi_host_template csio_fcoe_shost_template = {
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = csio_fcoe_lport_attrs, .shost_attrs = csio_fcoe_lport_attrs,
.max_sectors = CSIO_MAX_SECTOR_SIZE, .max_sectors = CSIO_MAX_SECTOR_SIZE,
.use_blk_tags = 1,
}; };
struct scsi_host_template csio_fcoe_shost_vport_template = { struct scsi_host_template csio_fcoe_shost_vport_template = {
@ -2303,7 +2302,6 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = csio_fcoe_vport_attrs, .shost_attrs = csio_fcoe_vport_attrs,
.max_sectors = CSIO_MAX_SECTOR_SIZE, .max_sectors = CSIO_MAX_SECTOR_SIZE,
.use_blk_tags = 1,
}; };
/* /*

View File

@ -256,7 +256,6 @@ static struct scsi_host_template driver_template = {
.proc_name = ESAS2R_DRVR_NAME, .proc_name = ESAS2R_DRVR_NAME,
.change_queue_depth = scsi_change_queue_depth, .change_queue_depth = scsi_change_queue_depth,
.max_sectors = 0xFFFF, .max_sectors = 0xFFFF,
.use_blk_tags = 1,
}; };
int sgl_page_size = 512; int sgl_page_size = 512;

View File

@ -2694,7 +2694,6 @@ struct scsi_host_template scsi_esp_template = {
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xffff, .max_sectors = 0xffff,
.skip_settle_delay = 1, .skip_settle_delay = 1,
.use_blk_tags = 1,
}; };
EXPORT_SYMBOL(scsi_esp_template); EXPORT_SYMBOL(scsi_esp_template);

View File

@ -287,7 +287,6 @@ static struct scsi_host_template fcoe_shost_template = {
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.max_sectors = 0xffff, .max_sectors = 0xffff,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -118,7 +118,6 @@ static struct scsi_host_template fnic_host_template = {
.sg_tablesize = FNIC_MAX_SG_DESC_CNT, .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
.max_sectors = 0xffff, .max_sectors = 0xffff,
.shost_attrs = fnic_attrs, .shost_attrs = fnic_attrs,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };
@ -697,13 +696,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
fnic->fnic_max_tag_id = host->can_queue; fnic->fnic_max_tag_id = host->can_queue;
err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to alloc shared tag map\n");
goto err_out_dev_close;
}
host->max_lun = fnic->config.luns_per_tgt; host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET; host->max_id = FNIC_MAX_FCP_TARGET;
host->max_cmd_len = FCOE_MAX_CMD_LEN; host->max_cmd_len = FCOE_MAX_CMD_LEN;

View File

@ -217,6 +217,13 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
error = scsi_mq_setup_tags(shost); error = scsi_mq_setup_tags(shost);
if (error) if (error)
goto fail; goto fail;
} else {
shost->bqt = blk_init_tags(shost->can_queue,
shost->hostt->tag_alloc_policy);
if (!shost->bqt) {
error = -ENOMEM;
goto fail;
}
} }
/* /*

View File

@ -4983,7 +4983,6 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
static int hpsa_scsi_host_alloc(struct ctlr_info *h) static int hpsa_scsi_host_alloc(struct ctlr_info *h)
{ {
struct Scsi_Host *sh; struct Scsi_Host *sh;
int error;
sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
if (sh == NULL) { if (sh == NULL) {
@ -5004,14 +5003,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
sh->hostdata[0] = (unsigned long) h; sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[h->intr_mode]; sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq; sh->unique_id = sh->irq;
error = scsi_init_shared_tag_map(sh, sh->can_queue);
if (error) {
dev_err(&h->pdev->dev,
"%s: scsi_init_shared_tag_map failed for controller %d\n",
__func__, h->ctlr);
scsi_host_put(sh);
return error;
}
h->scsi_host = sh; h->scsi_host = sh;
return 0; return 0;
} }

View File

@ -3095,7 +3095,6 @@ static struct scsi_host_template driver_template = {
.max_sectors = IBMVFC_MAX_SECTORS, .max_sectors = IBMVFC_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ibmvfc_attrs, .shost_attrs = ibmvfc_attrs,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -6502,7 +6502,6 @@ static struct scsi_host_template driver_template = {
.shost_attrs = ipr_ioa_attrs, .shost_attrs = ipr_ioa_attrs,
.sdev_attrs = ipr_dev_attrs, .sdev_attrs = ipr_dev_attrs,
.proc_name = IPR_NAME, .proc_name = IPR_NAME,
.use_blk_tags = 1,
}; };
/** /**

View File

@ -170,7 +170,6 @@ static struct scsi_host_template isci_sht = {
.target_destroy = sas_target_destroy, .target_destroy = sas_target_destroy,
.ioctl = sas_ioctl, .ioctl = sas_ioctl,
.shost_attrs = isci_host_attrs, .shost_attrs = isci_host_attrs,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -5914,7 +5914,6 @@ struct scsi_host_template lpfc_template_s3 = {
.max_sectors = 0xFFFF, .max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID, .vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth, .change_queue_depth = scsi_change_queue_depth,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };
@ -5940,7 +5939,6 @@ struct scsi_host_template lpfc_template = {
.max_sectors = 0xFFFF, .max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID, .vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth, .change_queue_depth = scsi_change_queue_depth,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };
@ -5964,6 +5962,5 @@ struct scsi_host_template lpfc_vport_template = {
.shost_attrs = lpfc_vport_attrs, .shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF, .max_sectors = 0xFFFF,
.change_queue_depth = scsi_change_queue_depth, .change_queue_depth = scsi_change_queue_depth,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -5049,7 +5049,6 @@ static int megasas_start_aen(struct megasas_instance *instance)
static int megasas_io_attach(struct megasas_instance *instance) static int megasas_io_attach(struct megasas_instance *instance)
{ {
struct Scsi_Host *host = instance->host; struct Scsi_Host *host = instance->host;
u32 error;
/* /*
* Export parameters required by SCSI mid-layer * Export parameters required by SCSI mid-layer
@ -5099,13 +5098,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->hostt->eh_device_reset_handler = NULL; host->hostt->eh_device_reset_handler = NULL;
host->hostt->eh_bus_reset_handler = NULL; host->hostt->eh_bus_reset_handler = NULL;
} }
error = scsi_init_shared_tag_map(host, host->can_queue);
if (error) {
dev_err(&instance->pdev->dev,
"Failed to shared tag from %s %d\n",
__func__, __LINE__);
return -ENODEV;
}
/* /*
* Notify the mid-layer about the new controller * Notify the mid-layer about the new controller

View File

@ -65,7 +65,6 @@ static struct scsi_host_template mvs_sht = {
.target_destroy = sas_target_destroy, .target_destroy = sas_target_destroy,
.ioctl = sas_ioctl, .ioctl = sas_ioctl,
.shost_attrs = mvst_host_attrs, .shost_attrs = mvst_host_attrs,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -88,7 +88,6 @@ static struct scsi_host_template pm8001_sht = {
.target_destroy = sas_target_destroy, .target_destroy = sas_target_destroy,
.ioctl = sas_ioctl, .ioctl = sas_ioctl,
.shost_attrs = pm8001_host_attrs, .shost_attrs = pm8001_host_attrs,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -4254,7 +4254,6 @@ static struct scsi_host_template pmcraid_host_template = {
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = pmcraid_host_attrs, .shost_attrs = pmcraid_host_attrs,
.proc_name = PMCRAID_DRIVER_NAME, .proc_name = PMCRAID_DRIVER_NAME,
.use_blk_tags = 1,
}; };
/* /*

View File

@ -267,7 +267,6 @@ struct scsi_host_template qla2xxx_driver_template = {
.shost_attrs = qla2x00_host_attrs, .shost_attrs = qla2x00_host_attrs,
.supported_mode = MODE_INITIATOR, .supported_mode = MODE_INITIATOR,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -212,7 +212,6 @@ static struct scsi_host_template qla4xxx_driver_template = {
.shost_attrs = qla4xxx_host_attrs, .shost_attrs = qla4xxx_host_attrs,
.host_reset = qla4xxx_host_reset, .host_reset = qla4xxx_host_reset,
.vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
.use_blk_tags = 1,
}; };
static struct iscsi_transport qla4xxx_iscsi_transport = { static struct iscsi_transport qla4xxx_iscsi_transport = {
@ -8697,13 +8696,6 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
host->can_queue = MAX_SRBS ; host->can_queue = MAX_SRBS ;
host->transportt = qla4xxx_scsi_transport; host->transportt = qla4xxx_scsi_transport;
ret = scsi_init_shared_tag_map(host, MAX_SRBS);
if (ret) {
ql4_printk(KERN_WARNING, ha,
"%s: scsi_init_shared_tag_map failed\n", __func__);
goto probe_failed;
}
pci_set_drvdata(pdev, ha); pci_set_drvdata(pdev, ha);
ret = scsi_add_host(host, &pdev->dev); ret = scsi_add_host(host, &pdev->dev);

View File

@ -616,32 +616,11 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
*/ */
int scsi_change_queue_depth(struct scsi_device *sdev, int depth) int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
{ {
unsigned long flags; if (depth > 0) {
sdev->queue_depth = depth;
if (depth <= 0) wmb();
goto out;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
/*
* Check to see if the queue is managed by the block layer.
* If it is, and we fail to adjust the depth, exit.
*
* Do not resize the tag map if it is a host wide share bqt,
* because the size should be the hosts's can_queue. If there
* is more IO than the LLD's can_queue (so there are not enuogh
* tags) request_fn's host queue ready check will handle it.
*/
if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
if (blk_queue_tagged(sdev->request_queue) &&
blk_queue_resize_tags(sdev->request_queue, depth) != 0)
goto out_unlock;
} }
sdev->queue_depth = depth;
out_unlock:
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
out:
return sdev->queue_depth; return sdev->queue_depth;
} }
EXPORT_SYMBOL(scsi_change_queue_depth); EXPORT_SYMBOL(scsi_change_queue_depth);

View File

@ -274,8 +274,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
WARN_ON_ONCE(!blk_get_queue(sdev->request_queue)); WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
sdev->request_queue->queuedata = sdev; sdev->request_queue->queuedata = sdev;
if (!shost_use_blk_mq(sdev->host) && if (!shost_use_blk_mq(sdev->host)) {
(shost->bqt || shost->hostt->use_blk_tags)) {
blk_queue_init_tags(sdev->request_queue, blk_queue_init_tags(sdev->request_queue,
sdev->host->cmd_per_lun, shost->bqt, sdev->host->cmd_per_lun, shost->bqt,
shost->hostt->tag_alloc_policy); shost->hostt->tag_alloc_policy);

View File

@ -124,7 +124,6 @@ static struct scsi_host_template snic_host_template = {
.sg_tablesize = SNIC_MAX_SG_DESC_CNT, .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
.max_sectors = 0x800, .max_sectors = 0x800,
.shost_attrs = snic_attrs, .shost_attrs = snic_attrs,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
.cmd_size = sizeof(struct snic_internal_io_state), .cmd_size = sizeof(struct snic_internal_io_state),
.proc_name = "snic_scsi", .proc_name = "snic_scsi",
@ -533,15 +532,6 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
snic->max_tag_id = shost->can_queue; snic->max_tag_id = shost->can_queue;
ret = scsi_init_shared_tag_map(shost, snic->max_tag_id);
if (ret) {
SNIC_HOST_ERR(shost,
"Unable to alloc shared tag map. %d\n",
ret);
goto err_dev_close;
}
shost->max_lun = snic->config.luns_per_tgt; shost->max_lun = snic->config.luns_per_tgt;
shost->max_id = SNIC_MAX_TARGET; shost->max_id = SNIC_MAX_TARGET;

View File

@ -1374,7 +1374,6 @@ static struct scsi_host_template driver_template = {
.eh_abort_handler = stex_abort, .eh_abort_handler = stex_abort,
.eh_host_reset_handler = stex_reset, .eh_host_reset_handler = stex_reset,
.this_id = -1, .this_id = -1,
.use_blk_tags = 1,
}; };
static struct pci_device_id stex_pci_tbl[] = { static struct pci_device_id stex_pci_tbl[] = {
@ -1659,13 +1658,6 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err) if (err)
goto out_free_irq; goto out_free_irq;
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
pci_name(pdev));
goto out_free_irq;
}
pci_set_drvdata(pdev, hba); pci_set_drvdata(pdev, hba);
err = scsi_add_host(host, &pdev->dev); err = scsi_add_host(host, &pdev->dev);

View File

@ -4355,7 +4355,6 @@ static struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN, .cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE, .can_queue = UFSHCD_CAN_QUEUE,
.max_host_blocked = 1, .max_host_blocked = 1,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };
@ -5619,13 +5618,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->is_irq_enabled = true; hba->is_irq_enabled = true;
} }
/* Enable SCSI tag mapping */
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
dev_err(hba->dev, "init shared queue failed\n");
goto exit_gating;
}
err = scsi_add_host(host, hba->dev); err = scsi_add_host(host, hba->dev);
if (err) { if (err) {
dev_err(hba->dev, "scsi_add_host failed\n"); dev_err(hba->dev, "scsi_add_host failed\n");

View File

@ -377,7 +377,6 @@ static struct scsi_host_template tcm_loop_driver_template = {
.use_clustering = DISABLE_CLUSTERING, .use_clustering = DISABLE_CLUSTERING,
.slave_alloc = tcm_loop_slave_alloc, .slave_alloc = tcm_loop_slave_alloc,
.module = THIS_MODULE, .module = THIS_MODULE,
.use_blk_tags = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
}; };

View File

@ -812,7 +812,6 @@ static struct scsi_host_template uas_host_template = {
.this_id = -1, .this_id = -1,
.sg_tablesize = SG_NONE, .sg_tablesize = SG_NONE,
.skip_settle_delay = 1, .skip_settle_delay = 1,
.use_blk_tags = 1,
}; };
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
@ -929,10 +928,6 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result) if (result)
goto set_alt0; goto set_alt0;
result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 2);
if (result)
goto free_streams;
usb_set_intfdata(intf, shost); usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev); result = scsi_add_host(shost, &intf->dev);
if (result) if (result)

View File

@ -405,11 +405,6 @@ struct scsi_host_template {
/* If use block layer to manage tags, this is tag allocation policy */ /* If use block layer to manage tags, this is tag allocation policy */
int tag_alloc_policy; int tag_alloc_policy;
/*
* Let the block layer assigns tags to all commands.
*/
unsigned use_blk_tags:1;
/* /*
* Track QUEUE_FULL events and reduce queue depth on demand. * Track QUEUE_FULL events and reduce queue depth on demand.
*/ */

View File

@ -10,91 +10,36 @@
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost,
int unique_tag)
{
u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
struct request *req = NULL;
if (hwq < shost->tag_set.nr_hw_queues)
req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
blk_mq_unique_tag_to_tag(unique_tag));
return req ? (struct scsi_cmnd *)req->special : NULL;
}
/**
* scsi_find_tag - find a tagged command by device
* @SDpnt: pointer to the ScSI device
* @tag: tag generated by blk_mq_unique_tag()
*
* Notes:
* Only works with tags allocated by the generic blk layer.
**/
static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag)
{
struct request *req;
if (tag != SCSI_NO_TAG) {
if (shost_use_blk_mq(sdev->host))
return scsi_mq_find_tag(sdev->host, tag);
req = blk_queue_find_tag(sdev->request_queue, tag);
return req ? (struct scsi_cmnd *)req->special : NULL;
}
/* single command, look in space */
return sdev->current_cmnd;
}
/**
* scsi_init_shared_tag_map - create a shared tag map
* @shost: the host to share the tag map among all devices
* @depth: the total depth of the map
*/
static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth)
{
/*
* We always have a shared tag map around when using blk-mq.
*/
if (shost_use_blk_mq(shost))
return 0;
/*
* If the shared tag map isn't already initialized, do it now.
* This saves callers from having to check ->bqt when setting up
* devices on the shared host (for libata)
*/
if (!shost->bqt) {
shost->bqt = blk_init_tags(depth,
shost->hostt->tag_alloc_policy);
if (!shost->bqt)
return -ENOMEM;
}
return 0;
}
/** /**
* scsi_host_find_tag - find the tagged command by host * scsi_host_find_tag - find the tagged command by host
* @shost: pointer to scsi_host * @shost: pointer to scsi_host
* @tag: tag generated by blk_mq_unique_tag() * @tag: tag
* *
* Notes: * Note: for devices using multiple hardware queues tag must have been
* Only works with tags allocated by the generic blk layer. * generated by blk_mq_unique_tag().
**/ **/
static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost, static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
int tag) int tag)
{ {
struct request *req; struct request *req = NULL;
if (tag != SCSI_NO_TAG) { if (tag == SCSI_NO_TAG)
if (shost_use_blk_mq(shost)) return NULL;
return scsi_mq_find_tag(shost, tag);
if (shost_use_blk_mq(shost)) {
u16 hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < shost->tag_set.nr_hw_queues) {
req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
blk_mq_unique_tag_to_tag(tag));
}
} else {
req = blk_map_queue_find_tag(shost->bqt, tag); req = blk_map_queue_find_tag(shost->bqt, tag);
return req ? (struct scsi_cmnd *)req->special : NULL;
} }
return NULL;
if (!req)
return NULL;
return req->special;
} }
#endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLOCK */