block: avoid building too big plug list
When I test fio script with big I/O depth, I found the total throughput drops compared to some relative small I/O depth. The reason is the thread accumulates big requests in its plug list and causes some delays (surely this depends on CPU speed). I thought we'd better have a threshold for requests. When a threshold reaches, this means there is no request merge and queue lock contention isn't severe when pushing per-task requests to queue, so the main advantages of blk plug don't exist. We can force a plug list flush in this case. With this, my test throughput actually increases and almost equals to small I/O depth. Another side effect is irq off time decreases in blk_flush_plug_list() for big I/O depth. The BLK_MAX_REQUEST_COUNT is choosen arbitarily, but 16 is efficiently to reduce lock contention to me. But I'm open here, 32 is ok in my test too. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
parent
719c0c5906
commit
55c022bbdd
|
@ -1302,7 +1302,10 @@ get_rq:
|
||||||
plug->should_sort = 1;
|
plug->should_sort = 1;
|
||||||
}
|
}
|
||||||
list_add_tail(&req->queuelist, &plug->list);
|
list_add_tail(&req->queuelist, &plug->list);
|
||||||
|
plug->count++;
|
||||||
drive_stat_acct(req, 1);
|
drive_stat_acct(req, 1);
|
||||||
|
if (plug->count >= BLK_MAX_REQUEST_COUNT)
|
||||||
|
blk_flush_plug_list(plug, false);
|
||||||
} else {
|
} else {
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
add_acct_request(q, req, where);
|
add_acct_request(q, req, where);
|
||||||
|
@ -2626,6 +2629,7 @@ void blk_start_plug(struct blk_plug *plug)
|
||||||
INIT_LIST_HEAD(&plug->list);
|
INIT_LIST_HEAD(&plug->list);
|
||||||
INIT_LIST_HEAD(&plug->cb_list);
|
INIT_LIST_HEAD(&plug->cb_list);
|
||||||
plug->should_sort = 0;
|
plug->should_sort = 0;
|
||||||
|
plug->count = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is a nested plug, don't actually assign it. It will be
|
* If this is a nested plug, don't actually assign it. It will be
|
||||||
|
@ -2709,6 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_splice_init(&plug->list, &list);
|
list_splice_init(&plug->list, &list);
|
||||||
|
plug->count = 0;
|
||||||
|
|
||||||
if (plug->should_sort) {
|
if (plug->should_sort) {
|
||||||
list_sort(NULL, &list, plug_rq_cmp);
|
list_sort(NULL, &list, plug_rq_cmp);
|
||||||
|
|
|
@ -862,7 +862,10 @@ struct blk_plug {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct list_head cb_list;
|
struct list_head cb_list;
|
||||||
unsigned int should_sort;
|
unsigned int should_sort;
|
||||||
|
unsigned int count;
|
||||||
};
|
};
|
||||||
|
#define BLK_MAX_REQUEST_COUNT 16
|
||||||
|
|
||||||
struct blk_plug_cb {
|
struct blk_plug_cb {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
void (*callback)(struct blk_plug_cb *);
|
void (*callback)(struct blk_plug_cb *);
|
||||||
|
|
Loading…
Reference in New Issue