Merge branch 'nvme-4.16-rc5' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes for this series from Keith:

"A few late fixes for 4.16:

 * Reverting sysfs slave device links for native nvme multipathing.
   The hidden disk attributes broke common user tools.

 * A fix for a PPC pci error handling regression.

 * Update pci interrupt count to consider the actual IRQ spread, fixing
   potentially poor initial queue affinity.

 * Off-by-one errors in nvme-fc queue sizes

 * A fabrics discovery fix to be more tolerant with user tools."

* 'nvme-4.16-rc5' of git://git.infradead.org/nvme:
  nvme_fc: rework sqsize handling
  nvme-fabrics: Ignore nr_io_queues option for discovery controllers
  Revert "nvme: create 'slaves' and 'holders' entries for hidden controllers"
  nvme: pci: pass max vectors as num_possible_cpus() to pci_alloc_irq_vectors
  nvme-pci: Fix EEH failure on ppc
This commit is contained in:
Jens Axboe 2018-03-08 19:12:59 -07:00
commit e576b7b528
6 changed files with 30 additions and 57 deletions

View File

@ -3033,7 +3033,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->disk->disk_name);
nvme_mpath_add_disk(ns->head);
nvme_mpath_add_disk_links(ns);
return;
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
@ -3053,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
return;
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
nvme_mpath_remove_disk_links(ns);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_id_attr_group);
if (ns->ndev)

View File

@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -EINVAL;
goto out;
}
if (opts->discovery_nqn) {
pr_debug("Ignoring nr_io_queues value for discovery controller\n");
break;
}
opts->nr_io_queues = min_t(unsigned int,
num_online_cpus(), token);
break;

View File

@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
/* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
lsop->queue = queue;
lsreq->rqstaddr = conn_rqst;
@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
goto out_free_tag_set;
}
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_cleanup_blk_queue;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_delete_hw_queues;
@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_free_io_queues;
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_delete_hw_queues;
@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_queue(ctrl, 0);
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
NVME_AQ_BLK_MQ_DEPTH);
NVME_AQ_DEPTH);
if (ret)
goto out_free_queue;
ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
NVME_AQ_BLK_MQ_DEPTH,
(NVME_AQ_BLK_MQ_DEPTH / 4));
NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
if (ret)
goto out_delete_hw_queue;
@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
}
ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (ret)
@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
opts->queue_size = ctrl->ctrl.maxcmd;
}
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
ret = nvme_fc_init_aen_ops(ctrl);
if (ret)
goto out_term_aen_ops;

View File

@ -210,25 +210,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
mutex_unlock(&head->subsys->lock);
}
void nvme_mpath_add_disk_links(struct nvme_ns *ns)
{
struct kobject *slave_disk_kobj, *holder_disk_kobj;
if (!ns->head->disk)
return;
slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
kobject_name(slave_disk_kobj)))
return;
holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
kobject_name(holder_disk_kobj)))
sysfs_remove_link(ns->head->disk->slave_dir,
kobject_name(slave_disk_kobj));
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
@ -243,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
blk_cleanup_queue(head->disk->queue);
put_disk(head->disk);
}
void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
{
if (!ns->head->disk)
return;
sysfs_remove_link(ns->disk->part0.holder_dir,
kobject_name(&disk_to_dev(ns->head->disk)->kobj));
sysfs_remove_link(ns->head->disk->slave_dir,
kobject_name(&disk_to_dev(ns->disk)->kobj));
}

View File

@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns_head *head);
void nvme_mpath_add_disk_links(struct nvme_ns *ns);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
}
static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
{
}
static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
{
}
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
}

View File

@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
if (!(csts & NVME_CSTS_CFS) && !nssro)
return false;
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
if (pci_channel_offline(to_pci_dev(dev->dev)))
return false;
return true;
}
@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_command cmd;
u32 csts = readl(dev->bar + NVME_REG_CSTS);
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
mb();
if (pci_channel_offline(to_pci_dev(dev->dev)))
return BLK_EH_RESET_TIMER;
/*
* Reset immediately if the controller is failed
*/
@ -1913,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
int result, nr_io_queues;
unsigned long size;
nr_io_queues = num_present_cpus();
nr_io_queues = num_possible_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;