Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: dm kcopyd: return client directly and not through a pointer dm kcopyd: reserve fewer pages dm io: use fixed initial mempool size dm kcopyd: alloc pages from the main page allocator dm kcopyd: add gfp parm to alloc_pl dm kcopyd: remove superfluous page allocation spinlock dm kcopyd: preallocate sub jobs to avoid deadlock dm kcopyd: avoid pointless job splitting dm mpath: do not fail paths after integrity errors dm table: reject devices without request fns dm table: allow targets to support discards internally
This commit is contained in:
commit
b11b06d90a
@ -19,6 +19,8 @@
|
||||
#define DM_MSG_PREFIX "io"
|
||||
|
||||
#define DM_IO_MAX_REGIONS BITS_PER_LONG
|
||||
#define MIN_IOS 16
|
||||
#define MIN_BIOS 16
|
||||
|
||||
struct dm_io_client {
|
||||
mempool_t *pool;
|
||||
@ -40,34 +42,22 @@ struct io {
|
||||
|
||||
static struct kmem_cache *_dm_io_cache;
|
||||
|
||||
/*
|
||||
* io contexts are only dynamically allocated for asynchronous
|
||||
* io. Since async io is likely to be the majority of io we'll
|
||||
* have the same number of io contexts as bios! (FIXME: must reduce this).
|
||||
*/
|
||||
|
||||
static unsigned int pages_to_ios(unsigned int pages)
|
||||
{
|
||||
return 4 * pages; /* too many ? */
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a client with mempool and bioset.
|
||||
*/
|
||||
struct dm_io_client *dm_io_client_create(unsigned num_pages)
|
||||
struct dm_io_client *dm_io_client_create(void)
|
||||
{
|
||||
unsigned ios = pages_to_ios(num_pages);
|
||||
struct dm_io_client *client;
|
||||
|
||||
client = kmalloc(sizeof(*client), GFP_KERNEL);
|
||||
if (!client)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
|
||||
client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
|
||||
if (!client->pool)
|
||||
goto bad;
|
||||
|
||||
client->bios = bioset_create(16, 0);
|
||||
client->bios = bioset_create(MIN_BIOS, 0);
|
||||
if (!client->bios)
|
||||
goto bad;
|
||||
|
||||
@ -81,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
|
||||
}
|
||||
EXPORT_SYMBOL(dm_io_client_create);
|
||||
|
||||
int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
|
||||
{
|
||||
return mempool_resize(client->pool, pages_to_ios(num_pages),
|
||||
GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL(dm_io_client_resize);
|
||||
|
||||
void dm_io_client_destroy(struct dm_io_client *client)
|
||||
{
|
||||
mempool_destroy(client->pool);
|
||||
|
@ -27,15 +27,19 @@
|
||||
|
||||
#include "dm.h"
|
||||
|
||||
#define SUB_JOB_SIZE 128
|
||||
#define SPLIT_COUNT 8
|
||||
#define MIN_JOBS 8
|
||||
#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Each kcopyd client has its own little pool of preallocated
|
||||
* pages for kcopyd io.
|
||||
*---------------------------------------------------------------*/
|
||||
struct dm_kcopyd_client {
|
||||
spinlock_t lock;
|
||||
struct page_list *pages;
|
||||
unsigned int nr_pages;
|
||||
unsigned int nr_free_pages;
|
||||
unsigned nr_reserved_pages;
|
||||
unsigned nr_free_pages;
|
||||
|
||||
struct dm_io_client *io_client;
|
||||
|
||||
@ -67,15 +71,18 @@ static void wake(struct dm_kcopyd_client *kc)
|
||||
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
|
||||
}
|
||||
|
||||
static struct page_list *alloc_pl(void)
|
||||
/*
|
||||
* Obtain one page for the use of kcopyd.
|
||||
*/
|
||||
static struct page_list *alloc_pl(gfp_t gfp)
|
||||
{
|
||||
struct page_list *pl;
|
||||
|
||||
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
|
||||
pl = kmalloc(sizeof(*pl), gfp);
|
||||
if (!pl)
|
||||
return NULL;
|
||||
|
||||
pl->page = alloc_page(GFP_KERNEL);
|
||||
pl->page = alloc_page(gfp);
|
||||
if (!pl->page) {
|
||||
kfree(pl);
|
||||
return NULL;
|
||||
@ -90,41 +97,56 @@ static void free_pl(struct page_list *pl)
|
||||
kfree(pl);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the provided pages to a client's free page list, releasing
|
||||
* back to the system any beyond the reserved_pages limit.
|
||||
*/
|
||||
static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
|
||||
{
|
||||
struct page_list *next;
|
||||
|
||||
do {
|
||||
next = pl->next;
|
||||
|
||||
if (kc->nr_free_pages >= kc->nr_reserved_pages)
|
||||
free_pl(pl);
|
||||
else {
|
||||
pl->next = kc->pages;
|
||||
kc->pages = pl;
|
||||
kc->nr_free_pages++;
|
||||
}
|
||||
|
||||
pl = next;
|
||||
} while (pl);
|
||||
}
|
||||
|
||||
static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
|
||||
unsigned int nr, struct page_list **pages)
|
||||
{
|
||||
struct page_list *pl;
|
||||
|
||||
spin_lock(&kc->lock);
|
||||
if (kc->nr_free_pages < nr) {
|
||||
spin_unlock(&kc->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
*pages = NULL;
|
||||
|
||||
kc->nr_free_pages -= nr;
|
||||
for (*pages = pl = kc->pages; --nr; pl = pl->next)
|
||||
;
|
||||
|
||||
kc->pages = pl->next;
|
||||
pl->next = NULL;
|
||||
|
||||
spin_unlock(&kc->lock);
|
||||
do {
|
||||
pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
|
||||
if (unlikely(!pl)) {
|
||||
/* Use reserved pages */
|
||||
pl = kc->pages;
|
||||
if (unlikely(!pl))
|
||||
goto out_of_memory;
|
||||
kc->pages = pl->next;
|
||||
kc->nr_free_pages--;
|
||||
}
|
||||
pl->next = *pages;
|
||||
*pages = pl;
|
||||
} while (--nr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
|
||||
{
|
||||
struct page_list *cursor;
|
||||
|
||||
spin_lock(&kc->lock);
|
||||
for (cursor = pl; cursor->next; cursor = cursor->next)
|
||||
kc->nr_free_pages++;
|
||||
|
||||
kc->nr_free_pages++;
|
||||
cursor->next = kc->pages;
|
||||
kc->pages = pl;
|
||||
spin_unlock(&kc->lock);
|
||||
out_of_memory:
|
||||
if (*pages)
|
||||
kcopyd_put_pages(kc, *pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -141,13 +163,16 @@ static void drop_pages(struct page_list *pl)
|
||||
}
|
||||
}
|
||||
|
||||
static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
|
||||
/*
|
||||
* Allocate and reserve nr_pages for the use of a specific client.
|
||||
*/
|
||||
static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned i;
|
||||
struct page_list *pl = NULL, *next;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
next = alloc_pl();
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
next = alloc_pl(GFP_KERNEL);
|
||||
if (!next) {
|
||||
if (pl)
|
||||
drop_pages(pl);
|
||||
@ -157,17 +182,18 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
|
||||
pl = next;
|
||||
}
|
||||
|
||||
kc->nr_reserved_pages += nr_pages;
|
||||
kcopyd_put_pages(kc, pl);
|
||||
kc->nr_pages += nr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void client_free_pages(struct dm_kcopyd_client *kc)
|
||||
{
|
||||
BUG_ON(kc->nr_free_pages != kc->nr_pages);
|
||||
BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
|
||||
drop_pages(kc->pages);
|
||||
kc->pages = NULL;
|
||||
kc->nr_free_pages = kc->nr_pages = 0;
|
||||
kc->nr_free_pages = kc->nr_reserved_pages = 0;
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
@ -216,16 +242,17 @@ struct kcopyd_job {
|
||||
struct mutex lock;
|
||||
atomic_t sub_jobs;
|
||||
sector_t progress;
|
||||
};
|
||||
|
||||
/* FIXME: this should scale with the number of pages */
|
||||
#define MIN_JOBS 512
|
||||
struct kcopyd_job *master_job;
|
||||
};
|
||||
|
||||
static struct kmem_cache *_job_cache;
|
||||
|
||||
int __init dm_kcopyd_init(void)
|
||||
{
|
||||
_job_cache = KMEM_CACHE(kcopyd_job, 0);
|
||||
_job_cache = kmem_cache_create("kcopyd_job",
|
||||
sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
|
||||
__alignof__(struct kcopyd_job), 0, NULL);
|
||||
if (!_job_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -299,7 +326,12 @@ static int run_complete_job(struct kcopyd_job *job)
|
||||
|
||||
if (job->pages)
|
||||
kcopyd_put_pages(kc, job->pages);
|
||||
mempool_free(job, kc->job_pool);
|
||||
/*
|
||||
* If this is the master job, the sub jobs have already
|
||||
* completed so we can free everything.
|
||||
*/
|
||||
if (job->master_job == job)
|
||||
mempool_free(job, kc->job_pool);
|
||||
fn(read_err, write_err, context);
|
||||
|
||||
if (atomic_dec_and_test(&kc->nr_jobs))
|
||||
@ -460,14 +492,14 @@ static void dispatch_job(struct kcopyd_job *job)
|
||||
wake(kc);
|
||||
}
|
||||
|
||||
#define SUB_JOB_SIZE 128
|
||||
static void segment_complete(int read_err, unsigned long write_err,
|
||||
void *context)
|
||||
{
|
||||
/* FIXME: tidy this function */
|
||||
sector_t progress = 0;
|
||||
sector_t count = 0;
|
||||
struct kcopyd_job *job = (struct kcopyd_job *) context;
|
||||
struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
|
||||
struct kcopyd_job *job = sub_job->master_job;
|
||||
struct dm_kcopyd_client *kc = job->kc;
|
||||
|
||||
mutex_lock(&job->lock);
|
||||
@ -498,8 +530,6 @@ static void segment_complete(int read_err, unsigned long write_err,
|
||||
|
||||
if (count) {
|
||||
int i;
|
||||
struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
|
||||
GFP_NOIO);
|
||||
|
||||
*sub_job = *job;
|
||||
sub_job->source.sector += progress;
|
||||
@ -511,7 +541,7 @@ static void segment_complete(int read_err, unsigned long write_err,
|
||||
}
|
||||
|
||||
sub_job->fn = segment_complete;
|
||||
sub_job->context = job;
|
||||
sub_job->context = sub_job;
|
||||
dispatch_job(sub_job);
|
||||
|
||||
} else if (atomic_dec_and_test(&job->sub_jobs)) {
|
||||
@ -531,19 +561,19 @@ static void segment_complete(int read_err, unsigned long write_err,
|
||||
}
|
||||
|
||||
/*
|
||||
* Create some little jobs that will do the move between
|
||||
* them.
|
||||
* Create some sub jobs to share the work between them.
|
||||
*/
|
||||
#define SPLIT_COUNT 8
|
||||
static void split_job(struct kcopyd_job *job)
|
||||
static void split_job(struct kcopyd_job *master_job)
|
||||
{
|
||||
int i;
|
||||
|
||||
atomic_inc(&job->kc->nr_jobs);
|
||||
atomic_inc(&master_job->kc->nr_jobs);
|
||||
|
||||
atomic_set(&job->sub_jobs, SPLIT_COUNT);
|
||||
for (i = 0; i < SPLIT_COUNT; i++)
|
||||
segment_complete(0, 0u, job);
|
||||
atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
|
||||
for (i = 0; i < SPLIT_COUNT; i++) {
|
||||
master_job[i + 1].master_job = master_job;
|
||||
segment_complete(0, 0u, &master_job[i + 1]);
|
||||
}
|
||||
}
|
||||
|
||||
int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
|
||||
@ -553,7 +583,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
|
||||
struct kcopyd_job *job;
|
||||
|
||||
/*
|
||||
* Allocate a new job.
|
||||
* Allocate an array of jobs consisting of one master job
|
||||
* followed by SPLIT_COUNT sub jobs.
|
||||
*/
|
||||
job = mempool_alloc(kc->job_pool, GFP_NOIO);
|
||||
|
||||
@ -577,10 +608,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
|
||||
|
||||
job->fn = fn;
|
||||
job->context = context;
|
||||
job->master_job = job;
|
||||
|
||||
if (job->source.count < SUB_JOB_SIZE)
|
||||
if (job->source.count <= SUB_JOB_SIZE)
|
||||
dispatch_job(job);
|
||||
|
||||
else {
|
||||
mutex_init(&job->lock);
|
||||
job->progress = 0;
|
||||
@ -606,17 +637,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
|
||||
/*-----------------------------------------------------------------
|
||||
* Client setup
|
||||
*---------------------------------------------------------------*/
|
||||
int dm_kcopyd_client_create(unsigned int nr_pages,
|
||||
struct dm_kcopyd_client **result)
|
||||
struct dm_kcopyd_client *dm_kcopyd_client_create(void)
|
||||
{
|
||||
int r = -ENOMEM;
|
||||
struct dm_kcopyd_client *kc;
|
||||
|
||||
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
|
||||
if (!kc)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock_init(&kc->lock);
|
||||
spin_lock_init(&kc->job_lock);
|
||||
INIT_LIST_HEAD(&kc->complete_jobs);
|
||||
INIT_LIST_HEAD(&kc->io_jobs);
|
||||
@ -633,12 +662,12 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
|
||||
goto bad_workqueue;
|
||||
|
||||
kc->pages = NULL;
|
||||
kc->nr_pages = kc->nr_free_pages = 0;
|
||||
r = client_alloc_pages(kc, nr_pages);
|
||||
kc->nr_reserved_pages = kc->nr_free_pages = 0;
|
||||
r = client_reserve_pages(kc, RESERVE_PAGES);
|
||||
if (r)
|
||||
goto bad_client_pages;
|
||||
|
||||
kc->io_client = dm_io_client_create(nr_pages);
|
||||
kc->io_client = dm_io_client_create();
|
||||
if (IS_ERR(kc->io_client)) {
|
||||
r = PTR_ERR(kc->io_client);
|
||||
goto bad_io_client;
|
||||
@ -647,8 +676,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
|
||||
init_waitqueue_head(&kc->destroyq);
|
||||
atomic_set(&kc->nr_jobs, 0);
|
||||
|
||||
*result = kc;
|
||||
return 0;
|
||||
return kc;
|
||||
|
||||
bad_io_client:
|
||||
client_free_pages(kc);
|
||||
@ -659,7 +687,7 @@ bad_workqueue:
|
||||
bad_slab:
|
||||
kfree(kc);
|
||||
|
||||
return r;
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
EXPORT_SYMBOL(dm_kcopyd_client_create);
|
||||
|
||||
|
@ -449,8 +449,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
|
||||
lc->io_req.mem.type = DM_IO_VMA;
|
||||
lc->io_req.notify.fn = NULL;
|
||||
lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
|
||||
PAGE_SIZE));
|
||||
lc->io_req.client = dm_io_client_create();
|
||||
if (IS_ERR(lc->io_req.client)) {
|
||||
r = PTR_ERR(lc->io_req.client);
|
||||
DMWARN("couldn't allocate disk io client");
|
||||
|
@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
|
||||
if (!error && !clone->errors)
|
||||
return 0; /* I/O complete */
|
||||
|
||||
if (error == -EOPNOTSUPP || error == -EREMOTEIO)
|
||||
if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
|
||||
return error;
|
||||
|
||||
if (mpio->pgpath)
|
||||
|
@ -22,8 +22,6 @@
|
||||
#define DM_MSG_PREFIX "raid1"
|
||||
|
||||
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
|
||||
#define DM_IO_PAGES 64
|
||||
#define DM_KCOPYD_PAGES 64
|
||||
|
||||
#define DM_RAID1_HANDLE_ERRORS 0x01
|
||||
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
|
||||
@ -887,7 +885,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ms->io_client = dm_io_client_create(DM_IO_PAGES);
|
||||
ms->io_client = dm_io_client_create();
|
||||
if (IS_ERR(ms->io_client)) {
|
||||
ti->error = "Error creating dm_io client";
|
||||
mempool_destroy(ms->read_record_pool);
|
||||
@ -1117,9 +1115,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
goto err_destroy_wq;
|
||||
}
|
||||
|
||||
r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
|
||||
if (r)
|
||||
ms->kcopyd_client = dm_kcopyd_client_create();
|
||||
if (IS_ERR(ms->kcopyd_client)) {
|
||||
r = PTR_ERR(ms->kcopyd_client);
|
||||
goto err_destroy_wq;
|
||||
}
|
||||
|
||||
wakeup_mirrord(ms);
|
||||
return 0;
|
||||
|
@ -154,11 +154,6 @@ struct pstore {
|
||||
struct workqueue_struct *metadata_wq;
|
||||
};
|
||||
|
||||
static unsigned sectors_to_pages(unsigned sectors)
|
||||
{
|
||||
return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
|
||||
}
|
||||
|
||||
static int alloc_area(struct pstore *ps)
|
||||
{
|
||||
int r = -ENOMEM;
|
||||
@ -318,8 +313,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
||||
chunk_size_supplied = 0;
|
||||
}
|
||||
|
||||
ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
|
||||
chunk_size));
|
||||
ps->io_client = dm_io_client_create();
|
||||
if (IS_ERR(ps->io_client))
|
||||
return PTR_ERR(ps->io_client);
|
||||
|
||||
@ -368,11 +362,6 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
|
||||
ps->io_client);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = alloc_area(ps);
|
||||
return r;
|
||||
|
||||
|
@ -39,11 +39,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
|
||||
*/
|
||||
#define SNAPSHOT_COPY_PRIORITY 2
|
||||
|
||||
/*
|
||||
* Reserve 1MB for each snapshot initially (with minimum of 1 page).
|
||||
*/
|
||||
#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
|
||||
|
||||
/*
|
||||
* The size of the mempool used to track chunks in use.
|
||||
*/
|
||||
@ -1116,8 +1111,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
goto bad_hash_tables;
|
||||
}
|
||||
|
||||
r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
|
||||
if (r) {
|
||||
s->kcopyd_client = dm_kcopyd_client_create();
|
||||
if (IS_ERR(s->kcopyd_client)) {
|
||||
r = PTR_ERR(s->kcopyd_client);
|
||||
ti->error = "Could not create kcopyd client";
|
||||
goto bad_kcopyd;
|
||||
}
|
||||
|
@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
|
||||
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
struct request_queue *q;
|
||||
struct queue_limits *limits = data;
|
||||
struct block_device *bdev = dev->bdev;
|
||||
sector_t dev_size =
|
||||
@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||
limits->logical_block_size >> SECTOR_SHIFT;
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
/*
|
||||
* Some devices exist without request functions,
|
||||
* such as loop devices not yet bound to backing files.
|
||||
* Forbid the use of such devices.
|
||||
*/
|
||||
q = bdev_get_queue(bdev);
|
||||
if (!q || !q->make_request_fn) {
|
||||
DMWARN("%s: %s is not yet initialised: "
|
||||
"start=%llu, len=%llu, dev_size=%llu",
|
||||
dm_device_name(ti->table->md), bdevname(bdev, b),
|
||||
(unsigned long long)start,
|
||||
(unsigned long long)len,
|
||||
(unsigned long long)dev_size);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!dev_size)
|
||||
return 0;
|
||||
|
||||
@ -1346,7 +1363,8 @@ bool dm_table_supports_discards(struct dm_table *t)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Ensure that at least one underlying device supports discards.
|
||||
* Unless any target used by the table set discards_supported,
|
||||
* require at least one underlying device to support discards.
|
||||
* t->devices includes internal dm devices such as mirror logs
|
||||
* so we need to use iterate_devices here, which targets
|
||||
* supporting discard must provide.
|
||||
@ -1354,6 +1372,9 @@ bool dm_table_supports_discards(struct dm_table *t)
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
|
||||
if (ti->discards_supported)
|
||||
return 1;
|
||||
|
||||
if (ti->type->iterate_devices &&
|
||||
ti->type->iterate_devices(ti, device_discard_capable, NULL))
|
||||
return 1;
|
||||
|
@ -191,6 +191,12 @@ struct dm_target {
|
||||
|
||||
/* Used to provide an error string from the ctr */
|
||||
char *error;
|
||||
|
||||
/*
|
||||
* Set if this target needs to receive discards regardless of
|
||||
* whether or not its underlying devices have support.
|
||||
*/
|
||||
unsigned discards_supported:1;
|
||||
};
|
||||
|
||||
/* Each target can link one of these into the table */
|
||||
|
@ -69,8 +69,7 @@ struct dm_io_request {
|
||||
*
|
||||
* Create/destroy may block.
|
||||
*/
|
||||
struct dm_io_client *dm_io_client_create(unsigned num_pages);
|
||||
int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
|
||||
struct dm_io_client *dm_io_client_create(void);
|
||||
void dm_io_client_destroy(struct dm_io_client *client);
|
||||
|
||||
/*
|
||||
|
@ -25,8 +25,7 @@
|
||||
* To use kcopyd you must first create a dm_kcopyd_client object.
|
||||
*/
|
||||
struct dm_kcopyd_client;
|
||||
int dm_kcopyd_client_create(unsigned num_pages,
|
||||
struct dm_kcopyd_client **result);
|
||||
struct dm_kcopyd_client *dm_kcopyd_client_create(void);
|
||||
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user