linux/drivers/lightnvm/pblk-read.c
Javier González d340121eb7 lightnvm: pblk: simplify data validity check on GC
When a line is selected for recycling by the garbage collector (GC), the
line state changes and the invalid bitmap is frozen, preventing
invalidations from happening. Throughout the GC, the L2P map is checked
to verify that not data being recycled has been updated. The last check
is done before the new map is being stored on the L2P table. Though
this algorithm works, it requires a number of corner cases to be checked
each time the L2P table is being updated. This complicates readability
and is error prone in case that the recycling algorithm is modified.

Instead, this patch makes the invalid bitmap accessible even when the
line is being recycled. When recycled data is being remapped, it is
enough to check the invalid bitmap for the line before updating the L2P
table.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2017-10-13 08:34:57 -06:00

545 lines
13 KiB
C

/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
* Matias Bjorling <matias@cnexlabs.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* pblk-read.c - pblk's read path
*/
#include "pblk.h"
/*
* There is no guarantee that the value read from cache has not been updated and
* resides at another location in the cache. We guarantee though that if the
* value is read from the cache, it belongs to the mapped lba. In order to
* guarantee and order between writes and reads are ordered, a flush must be
* issued.
*/
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
sector_t lba, struct ppa_addr ppa,
int bio_iter, bool advanced_bio)
{
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(pblk_ppa_empty(ppa));
BUG_ON(!pblk_addr_in_cache(ppa));
#endif
return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
bio_iter, advanced_bio);
}
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
sector_t blba, unsigned long *read_bitmap)
{
struct bio *bio = rqd->bio;
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
int nr_secs = rqd->nr_ppas;
bool advanced_bio = false;
int i, j = 0;
pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
for (i = 0; i < nr_secs; i++) {
struct ppa_addr p = ppas[i];
sector_t lba = blba + i;
retry:
if (pblk_ppa_empty(p)) {
WARN_ON(test_and_set_bit(i, read_bitmap));
if (unlikely(!advanced_bio)) {
bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
advanced_bio = true;
}
goto next;
}
/* Try to read from write buffer. The address is later checked
* on the write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(p)) {
if (!pblk_read_from_cache(pblk, bio, lba, p, i,
advanced_bio)) {
pblk_lookup_l2p_seq(pblk, &p, lba, 1);
goto retry;
}
WARN_ON(test_and_set_bit(i, read_bitmap));
advanced_bio = true;
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
} else {
/* Read from media non-cached sectors */
rqd->ppa_list[j++] = p;
}
next:
if (advanced_bio)
bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
}
if (pblk_io_aligned(pblk, nr_secs))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads);
#endif
}
static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
{
int err;
err = pblk_submit_io(pblk, rqd);
if (err)
return NVM_IO_ERR;
return NVM_IO_OK;
}
static void pblk_end_io_read(struct nvm_rq *rqd)
{
struct pblk *pblk = rqd->private;
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
if (rqd->error)
pblk_log_read_err(pblk, rqd);
#ifdef CONFIG_NVM_DEBUG
else
WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
#endif
nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
bio_put(bio);
if (r_ctx->private) {
struct bio *orig_bio = r_ctx->private;
#ifdef CONFIG_NVM_DEBUG
WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
#endif
bio_endio(orig_bio);
bio_put(orig_bio);
}
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
#endif
pblk_free_rqd(pblk, rqd, READ);
atomic_dec(&pblk->inflight_io);
}
static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
unsigned int bio_init_idx,
unsigned long *read_bitmap)
{
struct bio *new_bio, *bio = rqd->bio;
struct bio_vec src_bv, dst_bv;
void *ppa_ptr = NULL;
void *src_p, *dst_p;
dma_addr_t dma_ppa_list = 0;
int nr_secs = rqd->nr_ppas;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
int i, ret, hole;
DECLARE_COMPLETION_ONSTACK(wait);
new_bio = bio_alloc(GFP_KERNEL, nr_holes);
if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
goto err;
if (nr_holes != new_bio->bi_vcnt) {
pr_err("pblk: malformed bio\n");
goto err;
}
new_bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
new_bio->bi_private = &wait;
new_bio->bi_end_io = pblk_end_bio_sync;
rqd->bio = new_bio;
rqd->nr_ppas = nr_holes;
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd->end_io = NULL;
if (unlikely(nr_secs > 1 && nr_holes == 1)) {
ppa_ptr = rqd->ppa_list;
dma_ppa_list = rqd->dma_ppa_list;
rqd->ppa_addr = rqd->ppa_list[0];
}
ret = pblk_submit_read_io(pblk, rqd);
if (ret) {
bio_put(rqd->bio);
pr_err("pblk: read IO submission failed\n");
goto err;
}
if (!wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
pr_err("pblk: partial read I/O timed out\n");
}
if (rqd->error) {
atomic_long_inc(&pblk->read_failed);
#ifdef CONFIG_NVM_DEBUG
pblk_print_failed_rqd(pblk, rqd, rqd->error);
#endif
}
if (unlikely(nr_secs > 1 && nr_holes == 1)) {
rqd->ppa_list = ppa_ptr;
rqd->dma_ppa_list = dma_ppa_list;
}
/* Fill the holes in the original bio */
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
src_bv = new_bio->bi_io_vec[i++];
dst_bv = bio->bi_io_vec[bio_init_idx + hole];
src_p = kmap_atomic(src_bv.bv_page);
dst_p = kmap_atomic(dst_bv.bv_page);
memcpy(dst_p + dst_bv.bv_offset,
src_p + src_bv.bv_offset,
PBLK_EXPOSED_PAGE_SIZE);
kunmap_atomic(src_p);
kunmap_atomic(dst_p);
mempool_free(src_bv.bv_page, pblk->page_bio_pool);
hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
} while (hole < nr_secs);
bio_put(new_bio);
/* Complete the original bio and associated request */
rqd->bio = bio;
rqd->nr_ppas = nr_secs;
rqd->private = pblk;
bio_endio(bio);
pblk_end_io_read(rqd);
return NVM_IO_OK;
err:
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
rqd->private = pblk;
pblk_end_io_read(rqd);
return NVM_IO_ERR;
}
static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
sector_t lba, unsigned long *read_bitmap)
{
struct bio *bio = rqd->bio;
struct ppa_addr ppa;
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->inflight_reads);
#endif
retry:
if (pblk_ppa_empty(ppa)) {
WARN_ON(test_and_set_bit(0, read_bitmap));
return;
}
/* Try to read from write buffer. The address is later checked on the
* write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(ppa)) {
if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
goto retry;
}
WARN_ON(test_and_set_bit(0, read_bitmap));
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
} else {
rqd->ppa_addr = ppa;
}
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
}
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
struct nvm_rq *rqd;
unsigned long read_bitmap; /* Max 64 ppas per request */
unsigned int bio_init_idx;
int ret = NVM_IO_ERR;
/* logic error: lba out-of-bounds. Ignore read request */
if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
(unsigned long long)blba, nr_secs);
return NVM_IO_ERR;
}
bitmap_zero(&read_bitmap, nr_secs);
rqd = pblk_alloc_rqd(pblk, READ);
rqd->opcode = NVM_OP_PREAD;
rqd->bio = bio;
rqd->nr_ppas = nr_secs;
rqd->private = pblk;
rqd->end_io = pblk_end_io_read;
/* Save the index for this bio's start. This is needed in case
* we need to fill a partial read.
*/
bio_init_idx = pblk_get_bi_idx(bio);
rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd->dma_meta_list);
if (!rqd->meta_list) {
pr_err("pblk: not able to allocate ppa list\n");
goto fail_rqd_free;
}
if (nr_secs > 1) {
rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
} else {
pblk_read_rq(pblk, rqd, blba, &read_bitmap);
}
bio_get(bio);
if (bitmap_full(&read_bitmap, nr_secs)) {
bio_endio(bio);
atomic_inc(&pblk->inflight_io);
pblk_end_io_read(rqd);
return NVM_IO_OK;
}
/* All sectors are to be read from the device */
if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
struct bio *int_bio = NULL;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
/* Clone read bio to deal with read errors internally */
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
if (!int_bio) {
pr_err("pblk: could not clone read bio\n");
return NVM_IO_ERR;
}
rqd->bio = int_bio;
r_ctx->private = bio;
ret = pblk_submit_read_io(pblk, rqd);
if (ret) {
pr_err("pblk: read IO submission failed\n");
if (int_bio)
bio_put(int_bio);
return ret;
}
return NVM_IO_OK;
}
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
if (ret) {
pr_err("pblk: failed to perform partial read\n");
return ret;
}
return NVM_IO_OK;
fail_rqd_free:
pblk_free_rqd(pblk, rqd, READ);
return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_line *line, u64 *lba_list,
u64 *paddr_list_gc, unsigned int nr_secs)
{
struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
struct ppa_addr ppa_gc;
int valid_secs = 0;
int i;
pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
for (i = 0; i < nr_secs; i++) {
if (lba_list[i] == ADDR_EMPTY)
continue;
ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
continue;
}
rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
}
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(valid_secs, &pblk->inflight_reads);
#endif
return valid_secs;
}
static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_line *line, sector_t lba,
u64 paddr_gc)
{
struct ppa_addr ppa_l2p, ppa_gc;
int valid_secs = 0;
if (lba == ADDR_EMPTY)
goto out;
/* logic error: lba out-of-bounds */
if (lba >= pblk->rl.nr_secs) {
WARN(1, "pblk: read lba out of bounds\n");
goto out;
}
spin_lock(&pblk->trans_lock);
ppa_l2p = pblk_trans_map_get(pblk, lba);
spin_unlock(&pblk->trans_lock);
ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
goto out;
rqd->ppa_addr = ppa_l2p;
valid_secs = 1;
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->inflight_reads);
#endif
out:
return valid_secs;
}
int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct bio *bio;
struct nvm_rq rqd;
int data_len;
int ret = NVM_IO_OK;
DECLARE_COMPLETION_ONSTACK(wait);
memset(&rqd, 0, sizeof(struct nvm_rq));
rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd.dma_meta_list);
if (!rqd.meta_list)
return -ENOMEM;
if (gc_rq->nr_secs > 1) {
rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
gc_rq->lba_list,
gc_rq->paddr_list,
gc_rq->nr_secs);
if (gc_rq->secs_to_gc == 1)
rqd.ppa_addr = rqd.ppa_list[0];
} else {
gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
gc_rq->lba_list[0],
gc_rq->paddr_list[0]);
}
if (!(gc_rq->secs_to_gc))
goto out;
data_len = (gc_rq->secs_to_gc) * geo->sec_size;
bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
goto err_free_dma;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(bio, REQ_OP_READ, 0);
rqd.opcode = NVM_OP_PREAD;
rqd.end_io = pblk_end_io_sync;
rqd.private = &wait;
rqd.nr_ppas = gc_rq->secs_to_gc;
rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd.bio = bio;
if (pblk_submit_read_io(pblk, &rqd)) {
ret = -EIO;
pr_err("pblk: GC read request failed\n");
goto err_free_bio;
}
if (!wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
pr_err("pblk: GC read I/O timed out\n");
}
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
atomic_long_inc(&pblk->read_failed_gc);
#ifdef CONFIG_NVM_DEBUG
pblk_print_failed_rqd(pblk, &rqd, rqd.error);
#endif
}
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
#endif
bio_put(bio);
out:
nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
return ret;
err_free_bio:
bio_put(bio);
err_free_dma:
nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
return ret;
}