for-linus-20180302
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJamXImAAoJEPfTWPspceCmEP4P/3kkm0JIXtbNZFMb1JZtsjwE t4OUVEDj4jjRfmZfUVkajPnczM4MSPiXm43PbcOi4NF53mv8k76jyIPhlZREzYzq MBknibvpqyiWpbii9tBRrR6FGDR/N51//ya9vdPaYBcBssTg6Aqtt4BE5oPfo011 PleGROe1jtrBUNBy2dMy4sHb/MvZ0vRuNPxMsD8Agy5UiVeItAelY/lDn1Hw41BY O+muE5bw6+yKqB9vGXhV3O4WRh8BofJi1YdzbwbbIzH40ZZK5VTDQc5o19/CFEZ/ uZ8BStOFEWA0LNuarME5fknWcogiedEtszweyiWBbVZo4VqCsfxPoaRCibY/Wg5F a0UNJ4iSzglhfSMoHJlhvlCAMCyubFSeMSdJjrrpIcyBrziJXpcEXcUnWI43yi4P FoM8zUni22XnfLWxIdTjVkMRytjtqTLcXOHXdP5N/ESa80jBq3Q76TLmzIKW+kK5 sAre+hgr52NdgovP/NSxsdvsckAolWNe40JI8wLbwNo+lMHr0ckzOG+sAdz1iPRK iVL0CAlby4A94Wcu+OHCwfY7B9lBrMuMfHsesEM6x1cxgAhd3YNfEJ8g2QolCUEV KmZizXbV9nnmJfegVC06SgM+D7AR26dwsBG2aoibShuvdxX6jMdUHygyu5DCJdg/ JS+q71jmxb/r1TWe/62r =AMhV -----END PGP SIGNATURE----- Merge tag 'for-linus-20180302' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "A collection of fixes for this series. This is a little larger than usual at this time, but that's mainly because I was out on vacation last week. Nothing in here is major in any way, it's just two weeks of fixes. This contains: - NVMe pull from Keith, with a set of fixes from the usual suspects. - mq-deadline zone unlock fix from Damien, fixing an issue with the SMR zone locking added for 4.16. - two bcache fixes sent in by Michael, with changes from Coly and Tang. - comment typo fix from Eric for blktrace. - return-value error handling fix for nbd, from Gustavo. - fix a direct-io case where we don't defer to a completion handler, making us sleep from IRQ device completion. From Jan. - a small series from Jan fixing up holes around handling of bdev references. - small set of regression fixes from Jiufei, mostly fixing problems around the gendisk pointer -> partition index change. - regression fix from Ming, fixing a boundary issue with the discard page cache invalidation. - two-patch series from Ming, fixing both a core blk-mq-sched and kyber issue around token freeing on a requeue condition" * tag 'for-linus-20180302' of git://git.kernel.dk/linux-block: (24 commits) block: fix a typo block: display the correct diskname for bio block: fix the count of PGPGOUT for WRITE_SAME mq-deadline: Make sure to always unlock zones nvmet: fix PSDT field check in command format nvme-multipath: fix sysfs dangerously created links nbd: fix return value in error handling path bcache: fix kcrashes with fio in RAID5 backend dev bcache: correct flash only vols (check all uuids) blktrace_api.h: fix comment for struct blk_user_trace_setup blockdev: Avoid two active bdev inodes for one device genhd: Fix BUG in blkdev_open() genhd: Fix use after free in __blkdev_get() genhd: Add helper put_disk_and_module() genhd: Rename get_disk() to get_disk_and_module() genhd: Fix leaked module reference for NVME devices direct-io: Fix sleep in atomic due to sync AIO nvme-pci: Fix nvme queue cleanup if IRQ setup fails block: kyber: fix domain token leak during requeue blk-mq: don't call io sched's .requeue_request when requeueing rq to ->dispatch ...
This commit is contained in:
commit
fb6d47a592
@ -812,7 +812,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
struct gendisk *disk;
|
||||
struct request_queue *q;
|
||||
struct blkcg_gq *blkg;
|
||||
struct module *owner;
|
||||
unsigned int major, minor;
|
||||
int key_len, part, ret;
|
||||
char *body;
|
||||
@ -904,9 +903,7 @@ fail_unlock:
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
rcu_read_unlock();
|
||||
fail:
|
||||
owner = disk->fops->owner;
|
||||
put_disk(disk);
|
||||
module_put(owner);
|
||||
put_disk_and_module(disk);
|
||||
/*
|
||||
* If queue was bypassing, we should retry. Do so after a
|
||||
* short msleep(). It isn't strictly necessary but queue
|
||||
@ -931,13 +928,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
|
||||
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
|
||||
__releases(ctx->disk->queue->queue_lock) __releases(rcu)
|
||||
{
|
||||
struct module *owner;
|
||||
|
||||
spin_unlock_irq(ctx->disk->queue->queue_lock);
|
||||
rcu_read_unlock();
|
||||
owner = ctx->disk->fops->owner;
|
||||
put_disk(ctx->disk);
|
||||
module_put(owner);
|
||||
put_disk_and_module(ctx->disk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkg_conf_finish);
|
||||
|
||||
|
@ -2434,7 +2434,7 @@ blk_qc_t submit_bio(struct bio *bio)
|
||||
unsigned int count;
|
||||
|
||||
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
|
||||
count = queue_logical_block_size(bio->bi_disk->queue);
|
||||
count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
|
||||
else
|
||||
count = bio_sectors(bio);
|
||||
|
||||
|
@ -712,7 +712,6 @@ static void __blk_mq_requeue_request(struct request *rq)
|
||||
|
||||
trace_block_rq_requeue(q, rq);
|
||||
wbt_requeue(q->rq_wb, &rq->issue_stat);
|
||||
blk_mq_sched_requeue_request(rq);
|
||||
|
||||
if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
|
||||
blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
|
||||
@ -725,6 +724,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
|
||||
{
|
||||
__blk_mq_requeue_request(rq);
|
||||
|
||||
/* this request will be re-inserted to io scheduler queue */
|
||||
blk_mq_sched_requeue_request(rq);
|
||||
|
||||
BUG_ON(blk_queued_rq(rq));
|
||||
blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
|
||||
}
|
||||
|
@ -547,7 +547,7 @@ static int exact_lock(dev_t devt, void *data)
|
||||
{
|
||||
struct gendisk *p = data;
|
||||
|
||||
if (!get_disk(p))
|
||||
if (!get_disk_and_module(p))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
@ -717,6 +717,11 @@ void del_gendisk(struct gendisk *disk)
|
||||
blk_integrity_del(disk);
|
||||
disk_del_events(disk);
|
||||
|
||||
/*
|
||||
* Block lookups of the disk until all bdevs are unhashed and the
|
||||
* disk is marked as dead (GENHD_FL_UP cleared).
|
||||
*/
|
||||
down_write(&disk->lookup_sem);
|
||||
/* invalidate stuff */
|
||||
disk_part_iter_init(&piter, disk,
|
||||
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
|
||||
@ -731,6 +736,7 @@ void del_gendisk(struct gendisk *disk)
|
||||
bdev_unhash_inode(disk_devt(disk));
|
||||
set_capacity(disk, 0);
|
||||
disk->flags &= ~GENHD_FL_UP;
|
||||
up_write(&disk->lookup_sem);
|
||||
|
||||
if (!(disk->flags & GENHD_FL_HIDDEN))
|
||||
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
|
||||
@ -809,16 +815,28 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
|
||||
|
||||
spin_lock_bh(&ext_devt_lock);
|
||||
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
||||
if (part && get_disk(part_to_disk(part))) {
|
||||
if (part && get_disk_and_module(part_to_disk(part))) {
|
||||
*partno = part->partno;
|
||||
disk = part_to_disk(part);
|
||||
}
|
||||
spin_unlock_bh(&ext_devt_lock);
|
||||
}
|
||||
|
||||
if (disk && unlikely(disk->flags & GENHD_FL_HIDDEN)) {
|
||||
put_disk(disk);
|
||||
if (!disk)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Synchronize with del_gendisk() to not return disk that is being
|
||||
* destroyed.
|
||||
*/
|
||||
down_read(&disk->lookup_sem);
|
||||
if (unlikely((disk->flags & GENHD_FL_HIDDEN) ||
|
||||
!(disk->flags & GENHD_FL_UP))) {
|
||||
up_read(&disk->lookup_sem);
|
||||
put_disk_and_module(disk);
|
||||
disk = NULL;
|
||||
} else {
|
||||
up_read(&disk->lookup_sem);
|
||||
}
|
||||
return disk;
|
||||
}
|
||||
@ -1418,6 +1436,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
|
||||
kfree(disk);
|
||||
return NULL;
|
||||
}
|
||||
init_rwsem(&disk->lookup_sem);
|
||||
disk->node_id = node_id;
|
||||
if (disk_expand_part_tbl(disk, 0)) {
|
||||
free_part_stats(&disk->part0);
|
||||
@ -1453,7 +1472,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
|
||||
}
|
||||
EXPORT_SYMBOL(__alloc_disk_node);
|
||||
|
||||
struct kobject *get_disk(struct gendisk *disk)
|
||||
struct kobject *get_disk_and_module(struct gendisk *disk)
|
||||
{
|
||||
struct module *owner;
|
||||
struct kobject *kobj;
|
||||
@ -1471,17 +1490,30 @@ struct kobject *get_disk(struct gendisk *disk)
|
||||
return kobj;
|
||||
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(get_disk);
|
||||
EXPORT_SYMBOL(get_disk_and_module);
|
||||
|
||||
void put_disk(struct gendisk *disk)
|
||||
{
|
||||
if (disk)
|
||||
kobject_put(&disk_to_dev(disk)->kobj);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(put_disk);
|
||||
|
||||
/*
|
||||
* This is a counterpart of get_disk_and_module() and thus also of
|
||||
* get_gendisk().
|
||||
*/
|
||||
void put_disk_and_module(struct gendisk *disk)
|
||||
{
|
||||
if (disk) {
|
||||
struct module *owner = disk->fops->owner;
|
||||
|
||||
put_disk(disk);
|
||||
module_put(owner);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(put_disk_and_module);
|
||||
|
||||
static void set_disk_ro_uevent(struct gendisk *gd, int ro)
|
||||
{
|
||||
char event[] = "DISK_RO=1";
|
||||
|
@ -225,7 +225,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
|
||||
|
||||
if (start + len > i_size_read(bdev->bd_inode))
|
||||
return -EINVAL;
|
||||
truncate_inode_pages_range(mapping, start, start + len);
|
||||
truncate_inode_pages_range(mapping, start, start + len - 1);
|
||||
return blkdev_issue_discard(bdev, start >> 9, len >> 9,
|
||||
GFP_KERNEL, flags);
|
||||
}
|
||||
|
@ -833,6 +833,7 @@ static struct elevator_type kyber_sched = {
|
||||
.limit_depth = kyber_limit_depth,
|
||||
.prepare_request = kyber_prepare_request,
|
||||
.finish_request = kyber_finish_request,
|
||||
.requeue_request = kyber_finish_request,
|
||||
.completed_request = kyber_completed_request,
|
||||
.dispatch_request = kyber_dispatch_request,
|
||||
.has_work = kyber_has_work,
|
||||
|
@ -535,13 +535,22 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
spin_unlock(&dd->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing to do here. This is defined only to ensure that .finish_request
|
||||
* method is called upon request completion.
|
||||
*/
|
||||
static void dd_prepare_request(struct request *rq, struct bio *bio)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* For zoned block devices, write unlock the target zone of
|
||||
* completed write requests. Do this while holding the zone lock
|
||||
* spinlock so that the zone is never unlocked while deadline_fifo_request()
|
||||
* while deadline_next_request() are executing.
|
||||
* or deadline_next_request() are executing. This function is called for
|
||||
* all requests, whether or not these requests complete successfully.
|
||||
*/
|
||||
static void dd_completed_request(struct request *rq)
|
||||
static void dd_finish_request(struct request *rq)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
|
||||
@ -756,7 +765,8 @@ static struct elevator_type mq_deadline = {
|
||||
.ops.mq = {
|
||||
.insert_requests = dd_insert_requests,
|
||||
.dispatch_request = dd_dispatch_request,
|
||||
.completed_request = dd_completed_request,
|
||||
.prepare_request = dd_prepare_request,
|
||||
.finish_request = dd_finish_request,
|
||||
.next_request = elv_rb_latter_request,
|
||||
.former_request = elv_rb_former_request,
|
||||
.bio_merge = dd_bio_merge,
|
||||
|
@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf)
|
||||
|
||||
EXPORT_SYMBOL(bdevname);
|
||||
|
||||
const char *bio_devname(struct bio *bio, char *buf)
|
||||
{
|
||||
return disk_name(bio->bi_disk, bio->bi_partno, buf);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_devname);
|
||||
|
||||
/*
|
||||
* There's very little reason to use this, you should really
|
||||
* have a struct block_device just about everywhere and use
|
||||
|
@ -1758,7 +1758,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
|
||||
if (unit[drive].type->code == FD_NODRIVE)
|
||||
return NULL;
|
||||
*part = 0;
|
||||
return get_disk(unit[drive].gendisk);
|
||||
return get_disk_and_module(unit[drive].gendisk);
|
||||
}
|
||||
|
||||
static int __init amiga_floppy_probe(struct platform_device *pdev)
|
||||
|
@ -1917,7 +1917,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
|
||||
if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
|
||||
return NULL;
|
||||
*part = 0;
|
||||
return get_disk(unit[drive].disk);
|
||||
return get_disk_and_module(unit[drive].disk);
|
||||
}
|
||||
|
||||
static int __init atari_floppy_init (void)
|
||||
|
@ -456,7 +456,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
|
||||
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
brd = brd_init_one(MINOR(dev) / max_part, &new);
|
||||
kobj = brd ? get_disk(brd->brd_disk) : NULL;
|
||||
kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
|
||||
if (new)
|
||||
|
@ -4505,7 +4505,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
|
||||
if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
|
||||
return NULL;
|
||||
*part = 0;
|
||||
return get_disk(disks[drive]);
|
||||
return get_disk_and_module(disks[drive]);
|
||||
}
|
||||
|
||||
static int __init do_floppy_init(void)
|
||||
|
@ -1922,7 +1922,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
|
||||
if (err < 0)
|
||||
kobj = NULL;
|
||||
else
|
||||
kobj = get_disk(lo->lo_disk);
|
||||
kobj = get_disk_and_module(lo->lo_disk);
|
||||
mutex_unlock(&loop_index_mutex);
|
||||
|
||||
*part = 0;
|
||||
|
@ -1591,7 +1591,7 @@ again:
|
||||
if (new_index < 0) {
|
||||
mutex_unlock(&nbd_index_mutex);
|
||||
printk(KERN_ERR "nbd: failed to add new device\n");
|
||||
return ret;
|
||||
return new_index;
|
||||
}
|
||||
nbd = idr_find(&nbd_index_idr, new_index);
|
||||
}
|
||||
|
@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
|
||||
pkt->sector = new_sector;
|
||||
|
||||
bio_reset(pkt->bio);
|
||||
bio_set_set(pkt->bio, pd->bdev);
|
||||
bio_set_dev(pkt->bio, pd->bdev);
|
||||
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
|
||||
pkt->bio->bi_iter.bi_sector = new_sector;
|
||||
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
|
||||
|
@ -799,7 +799,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
|
||||
return NULL;
|
||||
|
||||
*part = 0;
|
||||
return get_disk(swd->unit[drive].disk);
|
||||
return get_disk_and_module(swd->unit[drive].disk);
|
||||
}
|
||||
|
||||
static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
|
||||
|
@ -332,7 +332,7 @@ static const struct block_device_operations z2_fops =
|
||||
static struct kobject *z2_find(dev_t dev, int *part, void *data)
|
||||
{
|
||||
*part = 0;
|
||||
return get_disk(z2ram_gendisk);
|
||||
return get_disk_and_module(z2ram_gendisk);
|
||||
}
|
||||
|
||||
static struct request_queue *z2_queue;
|
||||
|
@ -928,7 +928,7 @@ static int exact_lock(dev_t dev, void *data)
|
||||
{
|
||||
struct gendisk *p = data;
|
||||
|
||||
if (!get_disk(p))
|
||||
if (!get_disk_and_module(p))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -659,11 +659,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
|
||||
static void search_free(struct closure *cl)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
bio_complete(s);
|
||||
|
||||
if (s->iop.bio)
|
||||
bio_put(s->iop.bio);
|
||||
|
||||
bio_complete(s);
|
||||
closure_debug_destroy(cl);
|
||||
mempool_free(s, s->d->c->search);
|
||||
}
|
||||
|
@ -1274,7 +1274,7 @@ static int flash_devs_run(struct cache_set *c)
|
||||
struct uuid_entry *u;
|
||||
|
||||
for (u = c->uuids;
|
||||
u < c->uuids + c->devices_max_used && !ret;
|
||||
u < c->uuids + c->nr_uuids && !ret;
|
||||
u++)
|
||||
if (UUID_FLASH_ONLY(u))
|
||||
ret = flash_dev_run(c, u);
|
||||
|
@ -2844,7 +2844,7 @@ out:
|
||||
}
|
||||
|
||||
static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
|
||||
struct nvme_id_ns *id, bool *new)
|
||||
struct nvme_id_ns *id)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||
bool is_shared = id->nmic & (1 << 0);
|
||||
@ -2860,8 +2860,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
|
||||
ret = PTR_ERR(head);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
*new = true;
|
||||
} else {
|
||||
struct nvme_ns_ids ids;
|
||||
|
||||
@ -2873,8 +2871,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
*new = false;
|
||||
}
|
||||
|
||||
list_add_tail(&ns->siblings, &head->list);
|
||||
@ -2945,7 +2941,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
struct nvme_id_ns *id;
|
||||
char disk_name[DISK_NAME_LEN];
|
||||
int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT;
|
||||
bool new = true;
|
||||
|
||||
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
|
||||
if (!ns)
|
||||
@ -2971,7 +2966,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
if (id->ncap == 0)
|
||||
goto out_free_id;
|
||||
|
||||
if (nvme_init_ns_head(ns, nsid, id, &new))
|
||||
if (nvme_init_ns_head(ns, nsid, id))
|
||||
goto out_free_id;
|
||||
nvme_setup_streams_ns(ctrl, ns);
|
||||
|
||||
@ -3037,7 +3032,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
|
||||
ns->disk->disk_name);
|
||||
|
||||
if (new)
|
||||
nvme_mpath_add_disk(ns->head);
|
||||
nvme_mpath_add_disk_links(ns);
|
||||
return;
|
||||
|
@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
|
||||
*/
|
||||
int nvmf_register_transport(struct nvmf_transport_ops *ops)
|
||||
{
|
||||
if (!ops->create_ctrl || !ops->module)
|
||||
if (!ops->create_ctrl)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&nvmf_transports_rwsem);
|
||||
|
@ -198,11 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
|
||||
{
|
||||
if (!head->disk)
|
||||
return;
|
||||
|
||||
mutex_lock(&head->subsys->lock);
|
||||
if (!(head->disk->flags & GENHD_FL_UP)) {
|
||||
device_add_disk(&head->subsys->dev, head->disk);
|
||||
if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
|
||||
&nvme_ns_id_attr_group))
|
||||
pr_warn("%s: failed to create sysfs group for identification\n",
|
||||
head->disk->disk_name);
|
||||
}
|
||||
mutex_unlock(&head->subsys->lock);
|
||||
}
|
||||
|
||||
void nvme_mpath_add_disk_links(struct nvme_ns *ns)
|
||||
|
@ -1459,7 +1459,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
||||
nvmeq->cq_vector = qid - 1;
|
||||
result = adapter_alloc_cq(dev, qid, nvmeq);
|
||||
if (result < 0)
|
||||
return result;
|
||||
goto release_vector;
|
||||
|
||||
result = adapter_alloc_sq(dev, qid, nvmeq);
|
||||
if (result < 0)
|
||||
@ -1473,9 +1473,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
||||
return result;
|
||||
|
||||
release_sq:
|
||||
dev->online_queues--;
|
||||
adapter_delete_sq(dev, qid);
|
||||
release_cq:
|
||||
adapter_delete_cq(dev, qid);
|
||||
release_vector:
|
||||
nvmeq->cq_vector = -1;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
|
||||
struct nvme_rdma_device *dev = queue->device;
|
||||
struct ib_device *ibdev = dev->dev;
|
||||
|
||||
if (!blk_rq_bytes(rq))
|
||||
if (!blk_rq_payload_bytes(rq))
|
||||
return;
|
||||
|
||||
if (req->mr) {
|
||||
@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
|
||||
|
||||
c->common.flags |= NVME_CMD_SGL_METABUF;
|
||||
|
||||
if (!blk_rq_bytes(rq))
|
||||
if (!blk_rq_payload_bytes(rq))
|
||||
return nvme_rdma_set_sg_null(c);
|
||||
|
||||
req->sg_table.sgl = req->first_sgl;
|
||||
|
@ -520,9 +520,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* either variant of SGLs is fine, as we don't support metadata */
|
||||
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
|
||||
(flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
|
||||
/*
|
||||
* For fabrics, PSDT field shall describe metadata pointer (MPTR) that
|
||||
* contains an address of a single contiguous physical buffer that is
|
||||
* byte aligned.
|
||||
*/
|
||||
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
if (blk_rq_bytes(req)) {
|
||||
if (blk_rq_payload_bytes(req)) {
|
||||
iod->sg_table.sgl = iod->first_sgl;
|
||||
if (sg_alloc_table_chained(&iod->sg_table,
|
||||
blk_rq_nr_phys_segments(req),
|
||||
@ -193,7 +193,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
iod->req.sg = iod->sg_table.sgl;
|
||||
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
|
||||
iod->req.transfer_len = blk_rq_bytes(req);
|
||||
iod->req.transfer_len = blk_rq_payload_bytes(req);
|
||||
}
|
||||
|
||||
blk_mq_start_request(req);
|
||||
|
@ -1058,6 +1058,27 @@ retry:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
|
||||
{
|
||||
struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
|
||||
|
||||
if (!disk)
|
||||
return NULL;
|
||||
/*
|
||||
* Now that we hold gendisk reference we make sure bdev we looked up is
|
||||
* not stale. If it is, it means device got removed and created before
|
||||
* we looked up gendisk and we fail open in such case. Associating
|
||||
* unhashed bdev with newly created gendisk could lead to two bdevs
|
||||
* (and thus two independent caches) being associated with one device
|
||||
* which is bad.
|
||||
*/
|
||||
if (inode_unhashed(bdev->bd_inode)) {
|
||||
put_disk_and_module(disk);
|
||||
return NULL;
|
||||
}
|
||||
return disk;
|
||||
}
|
||||
|
||||
/**
|
||||
* bd_start_claiming - start claiming a block device
|
||||
* @bdev: block device of interest
|
||||
@ -1094,7 +1115,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
|
||||
* @bdev might not have been initialized properly yet, look up
|
||||
* and grab the outer block device the hard way.
|
||||
*/
|
||||
disk = get_gendisk(bdev->bd_dev, &partno);
|
||||
disk = bdev_get_gendisk(bdev, &partno);
|
||||
if (!disk)
|
||||
return ERR_PTR(-ENXIO);
|
||||
|
||||
@ -1111,8 +1132,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
|
||||
else
|
||||
whole = bdgrab(bdev);
|
||||
|
||||
module_put(disk->fops->owner);
|
||||
put_disk(disk);
|
||||
put_disk_and_module(disk);
|
||||
if (!whole)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -1407,10 +1427,10 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
|
||||
static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
struct module *owner;
|
||||
int ret;
|
||||
int partno;
|
||||
int perm = 0;
|
||||
bool first_open = false;
|
||||
|
||||
if (mode & FMODE_READ)
|
||||
perm |= MAY_READ;
|
||||
@ -1430,14 +1450,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
restart:
|
||||
|
||||
ret = -ENXIO;
|
||||
disk = get_gendisk(bdev->bd_dev, &partno);
|
||||
disk = bdev_get_gendisk(bdev, &partno);
|
||||
if (!disk)
|
||||
goto out;
|
||||
owner = disk->fops->owner;
|
||||
|
||||
disk_block_events(disk);
|
||||
mutex_lock_nested(&bdev->bd_mutex, for_part);
|
||||
if (!bdev->bd_openers) {
|
||||
first_open = true;
|
||||
bdev->bd_disk = disk;
|
||||
bdev->bd_queue = disk->queue;
|
||||
bdev->bd_contains = bdev;
|
||||
@ -1463,8 +1483,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
bdev->bd_queue = NULL;
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
disk_unblock_events(disk);
|
||||
put_disk(disk);
|
||||
module_put(owner);
|
||||
put_disk_and_module(disk);
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
@ -1524,15 +1543,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
if (ret)
|
||||
goto out_unlock_bdev;
|
||||
}
|
||||
/* only one opener holds refs to the module and disk */
|
||||
put_disk(disk);
|
||||
module_put(owner);
|
||||
}
|
||||
bdev->bd_openers++;
|
||||
if (for_part)
|
||||
bdev->bd_part_count++;
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
disk_unblock_events(disk);
|
||||
/* only one opener holds refs to the module and disk */
|
||||
if (!first_open)
|
||||
put_disk_and_module(disk);
|
||||
return 0;
|
||||
|
||||
out_clear:
|
||||
@ -1546,8 +1565,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
out_unlock_bdev:
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
disk_unblock_events(disk);
|
||||
put_disk(disk);
|
||||
module_put(owner);
|
||||
put_disk_and_module(disk);
|
||||
out:
|
||||
bdput(bdev);
|
||||
|
||||
@ -1770,8 +1788,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
disk->fops->release(disk, mode);
|
||||
}
|
||||
if (!bdev->bd_openers) {
|
||||
struct module *owner = disk->fops->owner;
|
||||
|
||||
disk_put_part(bdev->bd_part);
|
||||
bdev->bd_part = NULL;
|
||||
bdev->bd_disk = NULL;
|
||||
@ -1779,8 +1795,7 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
victim = bdev->bd_contains;
|
||||
bdev->bd_contains = NULL;
|
||||
|
||||
put_disk(disk);
|
||||
module_put(owner);
|
||||
put_disk_and_module(disk);
|
||||
}
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
bdput(bdev);
|
||||
|
@ -1274,8 +1274,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
||||
*/
|
||||
if (dio->is_async && iov_iter_rw(iter) == WRITE) {
|
||||
retval = 0;
|
||||
if ((iocb->ki_filp->f_flags & O_DSYNC) ||
|
||||
IS_SYNC(iocb->ki_filp->f_mapping->host))
|
||||
if (iocb->ki_flags & IOCB_DSYNC)
|
||||
retval = dio_set_defer_completion(dio);
|
||||
else if (!dio->inode->i_sb->s_dio_done_wq) {
|
||||
/*
|
||||
|
@ -511,6 +511,7 @@ void zero_fill_bio(struct bio *bio);
|
||||
extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
|
||||
extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
|
||||
extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||
extern const char *bio_devname(struct bio *bio, char *buffer);
|
||||
|
||||
#define bio_set_dev(bio, bdev) \
|
||||
do { \
|
||||
@ -529,9 +530,6 @@ do { \
|
||||
#define bio_dev(bio) \
|
||||
disk_devt((bio)->bi_disk)
|
||||
|
||||
#define bio_devname(bio, buf) \
|
||||
__bdevname(bio_dev(bio), (buf))
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
|
||||
void bio_disassociate_task(struct bio *bio);
|
||||
|
@ -198,6 +198,7 @@ struct gendisk {
|
||||
void *private_data;
|
||||
|
||||
int flags;
|
||||
struct rw_semaphore lookup_sem;
|
||||
struct kobject *slave_dir;
|
||||
|
||||
struct timer_rand_state *random;
|
||||
@ -600,8 +601,9 @@ extern void delete_partition(struct gendisk *, int);
|
||||
extern void printk_all_partitions(void);
|
||||
|
||||
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
|
||||
extern struct kobject *get_disk(struct gendisk *disk);
|
||||
extern struct kobject *get_disk_and_module(struct gendisk *disk);
|
||||
extern void put_disk(struct gendisk *disk);
|
||||
extern void put_disk_and_module(struct gendisk *disk);
|
||||
extern void blk_register_region(dev_t devt, unsigned long range,
|
||||
struct module *module,
|
||||
struct kobject *(*probe)(dev_t, int *, void *),
|
||||
|
@ -131,7 +131,7 @@ enum {
|
||||
#define BLKTRACE_BDEV_SIZE 32
|
||||
|
||||
/*
|
||||
* User setup structure passed with BLKTRACESTART
|
||||
* User setup structure passed with BLKTRACESETUP
|
||||
*/
|
||||
struct blk_user_trace_setup {
|
||||
char name[BLKTRACE_BDEV_SIZE]; /* output */
|
||||
|
Loading…
Reference in New Issue
Block a user