dm thin: performance improvement to discard processing
When processing a discard bio, if the block is already quiesced do the discard immediately rather than adding the mapping to a list for the next iteration of the worker thread. Discarding a fully provisioned 100G thin volume with 64k block size goes from 860s to 95s with this change. Clearly there's something wrong with the worker architecture, more investigation needed. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
36f12aeb71
commit
7a7e97ca58
@ -1194,7 +1194,6 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
|
|||||||
static void process_discard(struct thin_c *tc, struct bio *bio)
|
static void process_discard(struct thin_c *tc, struct bio *bio)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
unsigned long flags;
|
|
||||||
struct pool *pool = tc->pool;
|
struct pool *pool = tc->pool;
|
||||||
struct dm_bio_prison_cell *cell, *cell2;
|
struct dm_bio_prison_cell *cell, *cell2;
|
||||||
struct dm_cell_key key, key2;
|
struct dm_cell_key key, key2;
|
||||||
@ -1235,12 +1234,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
|
|||||||
m->cell2 = cell2;
|
m->cell2 = cell2;
|
||||||
m->bio = bio;
|
m->bio = bio;
|
||||||
|
|
||||||
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
|
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
pool->process_prepared_discard(m);
|
||||||
list_add_tail(&m->list, &pool->prepared_discards);
|
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
|
||||||
wake_worker(pool);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
inc_all_io_entry(pool, bio);
|
inc_all_io_entry(pool, bio);
|
||||||
cell_defer_no_holder(tc, cell);
|
cell_defer_no_holder(tc, cell);
|
||||||
|
Loading…
Reference in New Issue
Block a user