dm: don't start current request if it would've merged with the previous

Request-based DM's dm_request_fn() is so fast to pull requests off the
queue that steps need to be taken to promote merging by avoiding request
processing if it makes sense.

If the current request would've merged with previous request let the
current request stay on the queue longer.

Suggested-by: Jens Axboe <axboe@fb.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Mike Snitzer 2015-02-24 21:58:21 -05:00
parent d548b34b06
commit de3ec86dff

View File

@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/kthread.h>
#include <linux/elevator.h> /* for rq_end_sector() */
#include <trace/events/block.h>
@ -216,6 +217,10 @@ struct mapped_device {
struct kthread_worker kworker;
struct task_struct *kworker_task;
/* for request-based merge heuristic in dm_request_fn() */
sector_t last_rq_pos;
int last_rq_rw;
};
/*
@ -1930,6 +1935,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
blk_start_request(orig);
atomic_inc(&md->pending[rq_data_dir(orig)]);
md->last_rq_pos = rq_end_sector(orig);
md->last_rq_rw = rq_data_dir(orig);
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
@ -1982,6 +1990,10 @@ static void dm_request_fn(struct request_queue *q)
continue;
}
if (md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
goto delay_and_out;
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;