diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h index ccbe852d5362..2816018faa7f 100644 --- a/drivers/md/dm-cache-policy-internal.h +++ b/drivers/md/dm-cache-policy-internal.h @@ -83,10 +83,10 @@ static inline dm_cblock_t policy_residency(struct dm_cache_policy *p) return p->residency(p); } -static inline void policy_tick(struct dm_cache_policy *p) +static inline void policy_tick(struct dm_cache_policy *p, bool can_block) { if (p->tick) - return p->tick(p); + return p->tick(p, can_block); } static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result, diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 084eec653321..838665bb495a 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -1283,7 +1283,7 @@ static dm_cblock_t mq_residency(struct dm_cache_policy *p) return r; } -static void mq_tick(struct dm_cache_policy *p) +static void mq_tick(struct dm_cache_policy *p, bool can_block) { struct mq_policy *mq = to_mq_policy(p); unsigned long flags; @@ -1291,6 +1291,12 @@ static void mq_tick(struct dm_cache_policy *p) spin_lock_irqsave(&mq->tick_lock, flags); mq->tick_protected++; spin_unlock_irqrestore(&mq->tick_lock, flags); + + if (can_block) { + mutex_lock(&mq->lock); + copy_tick(mq); + mutex_unlock(&mq->lock); + } } static int mq_set_config_value(struct dm_cache_policy *p, diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 55a657f78f00..66feb307e697 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1581,7 +1581,7 @@ static dm_cblock_t smq_residency(struct dm_cache_policy *p) return r; } -static void smq_tick(struct dm_cache_policy *p) +static void smq_tick(struct dm_cache_policy *p, bool can_block) { struct smq_policy *mq = to_smq_policy(p); unsigned long flags; @@ -1589,6 +1589,12 @@ static void smq_tick(struct dm_cache_policy *p) spin_lock_irqsave(&mq->tick_lock, flags); mq->tick_protected++; spin_unlock_irqrestore(&mq->tick_lock, flags); + + if (can_block) { + mutex_lock(&mq->lock); + copy_tick(mq); + mutex_unlock(&mq->lock); + } } /* Init the policy plugin interface function pointers. */ diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index 74709129d856..05db56eedb6a 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -200,10 +200,10 @@ struct dm_cache_policy { * Because of where we sit in the block layer, we can be asked to * map a lot of little bios that are all in the same block (no * queue merging has occurred). To stop the policy being fooled by - * these the core target sends regular tick() calls to the policy. + * these, the core target sends regular tick() calls to the policy. * The policy should only count an entry as hit once per tick. */ - void (*tick)(struct dm_cache_policy *p); + void (*tick)(struct dm_cache_policy *p, bool can_block); /* * Configuration. diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 5aad875b822c..1b4e1756b169 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2271,7 +2271,7 @@ static void do_worker(struct work_struct *ws) static void do_waker(struct work_struct *ws) { struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); - policy_tick(cache->policy); + policy_tick(cache->policy, true); wake_worker(cache); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); } @@ -3148,7 +3148,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); if (pb->tick) { - policy_tick(cache->policy); + policy_tick(cache->policy, false); spin_lock_irqsave(&cache->lock, flags); cache->need_tick_bio = true;