diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index aaeb82ed2852..4899ebe767c8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -830,6 +830,11 @@ unsigned dm_table_get_type(struct dm_table *t) return t->type; } +bool dm_table_bio_based(struct dm_table *t) +{ + return dm_table_get_type(t) == DM_TYPE_BIO_BASED; +} + bool dm_table_request_based(struct dm_table *t) { return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5a843c1f4d64..00c768860818 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1768,7 +1768,6 @@ static struct mapped_device *alloc_dev(int minor) md->queue->backing_dev_info.congested_fn = dm_any_congested; md->queue->backing_dev_info.congested_data = md; blk_queue_make_request(md->queue, dm_request); - blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); md->queue->unplug_fn = dm_unplug_all; blk_queue_merge_bvec(md->queue, dm_merge_bvec); @@ -2201,6 +2200,16 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) goto out; } + /* + * It is enought that blk_queue_ordered() is called only once when + * the first bio-based table is bound. + * + * This setting should be moved to alloc_dev() when request-based dm + * supports barrier. + */ + if (!md->map && dm_table_bio_based(table)) + blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); + __unbind(md); r = __bind(md, table, &limits); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a7663eba17e2..23278ae80f08 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -61,6 +61,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); int dm_table_any_busy_target(struct dm_table *t); int dm_table_set_type(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t); +bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); int dm_table_alloc_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t);