writeback: make bdi_start_background_writeback() take bdi_writeback instead of backing_dev_info
bdi_start_background_writeback() currently takes @bdi and kicks the root wb (bdi_writeback). In preparation for cgroup writeback support, make it take wb instead. This patch doesn't make any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
bc05873dcc
commit
9ecf4866c0
|
@ -228,23 +228,23 @@ void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
|
|||
}
|
||||
|
||||
/**
|
||||
* bdi_start_background_writeback - start background writeback
|
||||
* @bdi: the backing device to write from
|
||||
* wb_start_background_writeback - start background writeback
|
||||
* @wb: bdi_writback to write from
|
||||
*
|
||||
* Description:
|
||||
* This makes sure WB_SYNC_NONE background writeback happens. When
|
||||
* this function returns, it is only guaranteed that for given BDI
|
||||
* this function returns, it is only guaranteed that for given wb
|
||||
* some IO is happening if we are over background dirty threshold.
|
||||
* Caller need not hold sb s_umount semaphore.
|
||||
*/
|
||||
void bdi_start_background_writeback(struct backing_dev_info *bdi)
|
||||
void wb_start_background_writeback(struct bdi_writeback *wb)
|
||||
{
|
||||
/*
|
||||
* We just wake up the flusher thread. It will perform background
|
||||
* writeback as soon as there is no other work to do.
|
||||
*/
|
||||
trace_writeback_wake_background(bdi);
|
||||
wb_wakeup(&bdi->wb);
|
||||
trace_writeback_wake_background(wb->bdi);
|
||||
wb_wakeup(wb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -27,7 +27,7 @@ void bdi_unregister(struct backing_dev_info *bdi);
|
|||
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
|
||||
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
|
||||
bool range_cyclic, enum wb_reason reason);
|
||||
void bdi_start_background_writeback(struct backing_dev_info *bdi);
|
||||
void wb_start_background_writeback(struct bdi_writeback *wb);
|
||||
void wb_workfn(struct work_struct *work);
|
||||
void wb_wakeup_delayed(struct bdi_writeback *wb);
|
||||
|
||||
|
|
|
@ -1456,7 +1456,7 @@ static void balance_dirty_pages(struct address_space *mapping,
|
|||
}
|
||||
|
||||
if (unlikely(!writeback_in_progress(wb)))
|
||||
bdi_start_background_writeback(bdi);
|
||||
wb_start_background_writeback(wb);
|
||||
|
||||
if (!strictlimit)
|
||||
wb_dirty_limits(wb, dirty_thresh, background_thresh,
|
||||
|
@ -1588,7 +1588,7 @@ pause:
|
|||
return;
|
||||
|
||||
if (nr_reclaimable > background_thresh)
|
||||
bdi_start_background_writeback(bdi);
|
||||
wb_start_background_writeback(wb);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(int, bdp_ratelimits);
|
||||
|
|
Loading…
Reference in New Issue