dm cache: be much more aggressive about promoting writes to discarded blocks

Previously these promotions only got priority if there were unused cache
blocks.  Now we give them priority if there are any clean blocks in the
cache.

The fio_soak_test in the device-mapper-test-suite now gives uniform
performance across subvolumes (~16 seconds).

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Joe Thornber 2013-10-24 14:10:28 -04:00 committed by Mike Snitzer
parent 01911c19be
commit c86c30706c

View File

@ -150,6 +150,21 @@ static void queue_init(struct queue *q)
INIT_LIST_HEAD(q->qs + i); INIT_LIST_HEAD(q->qs + i);
} }
/*
* Checks to see if the queue is empty.
* FIXME: reduce cpu usage.
*/
static bool queue_empty(struct queue *q)
{
unsigned i;
for (i = 0; i < NR_QUEUE_LEVELS; i++)
if (!list_empty(q->qs + i))
return false;
return true;
}
/* /*
* Insert an entry to the back of the given level. * Insert an entry to the back of the given level.
*/ */
@ -442,6 +457,11 @@ static bool any_free_cblocks(struct mq_policy *mq)
return mq->nr_cblocks_allocated < from_cblock(mq->cache_size); return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
} }
static bool any_clean_cblocks(struct mq_policy *mq)
{
return !queue_empty(&mq->cache_clean);
}
/* /*
* Fills result out with a cache block that isn't in use, or return * Fills result out with a cache block that isn't in use, or return
* -ENOSPC. This does _not_ mark the cblock as allocated, the caller is * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
@ -688,17 +708,18 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock, dm_cblock_t
static unsigned adjusted_promote_threshold(struct mq_policy *mq, static unsigned adjusted_promote_threshold(struct mq_policy *mq,
bool discarded_oblock, int data_dir) bool discarded_oblock, int data_dir)
{ {
if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE) if (data_dir == READ)
return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
/* /*
* We don't need to do any copying at all, so give this a * We don't need to do any copying at all, so give this a
* very low threshold. In practice this only triggers * very low threshold.
* during initial population after a format.
*/ */
return DISCARDED_PROMOTE_THRESHOLD; return DISCARDED_PROMOTE_THRESHOLD;
}
return data_dir == READ ? return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
(mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
(mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
} }
static bool should_promote(struct mq_policy *mq, struct entry *e, static bool should_promote(struct mq_policy *mq, struct entry *e,
@ -772,6 +793,17 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
return r; return r;
} }
static void insert_entry_in_pre_cache(struct mq_policy *mq,
struct entry *e, dm_oblock_t oblock)
{
e->in_cache = false;
e->dirty = false;
e->oblock = oblock;
e->hit_count = 1;
e->generation = mq->generation;
push(mq, e);
}
static void insert_in_pre_cache(struct mq_policy *mq, static void insert_in_pre_cache(struct mq_policy *mq,
dm_oblock_t oblock) dm_oblock_t oblock)
{ {
@ -789,32 +821,43 @@ static void insert_in_pre_cache(struct mq_policy *mq,
return; return;
} }
e->in_cache = false; insert_entry_in_pre_cache(mq, e, oblock);
e->dirty = false;
e->oblock = oblock;
e->hit_count = 1;
e->generation = mq->generation;
push(mq, e);
} }
static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock, static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
struct policy_result *result) struct policy_result *result)
{ {
int r;
struct entry *e; struct entry *e;
dm_cblock_t cblock; dm_cblock_t cblock;
if (find_free_cblock(mq, &cblock) == -ENOSPC) { if (find_free_cblock(mq, &cblock) == -ENOSPC) {
r = demote_cblock(mq, &result->old_oblock, &cblock);
if (unlikely(r)) {
result->op = POLICY_MISS; result->op = POLICY_MISS;
insert_in_pre_cache(mq, oblock); insert_in_pre_cache(mq, oblock);
return; return;
} }
/*
* This will always succeed, since we've just demoted.
*/
e = pop(mq, &mq->pre_cache);
result->op = POLICY_REPLACE;
} else {
e = alloc_entry(mq); e = alloc_entry(mq);
if (unlikely(!e))
e = pop(mq, &mq->pre_cache);
if (unlikely(!e)) { if (unlikely(!e)) {
result->op = POLICY_MISS; result->op = POLICY_MISS;
return; return;
} }
result->op = POLICY_NEW;
}
e->oblock = oblock; e->oblock = oblock;
e->cblock = cblock; e->cblock = cblock;
e->in_cache = true; e->in_cache = true;
@ -823,7 +866,6 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
e->generation = mq->generation; e->generation = mq->generation;
push(mq, e); push(mq, e);
result->op = POLICY_NEW;
result->cblock = e->cblock; result->cblock = e->cblock;
} }