dm dust: use dust block size for badblocklist index

Change the "frontend" dust_remove_block, dust_add_block, and
dust_query_block functions to store the "dust block number", instead
of the sector number corresponding to the "dust block number".

For the "backend" functions dust_map_read and dust_map_write,
right-shift by sect_per_block_shift.  This fixes the inability to
emulate failure beyond the first sector of each "dust block" (for
devices with a "dust block size" larger than 512 bytes).

Fixes: e4f3fabd67 ("dm: add dust target")
Cc: stable@vger.kernel.org
Signed-off-by: Bryan Gurney <bgurney@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Bryan Gurney 2019-08-16 10:09:53 -04:00 committed by Mike Snitzer
parent 5729b6e5a1
commit 08c04c84a5

View File

@ -25,6 +25,7 @@ struct dust_device {
unsigned long long badblock_count; unsigned long long badblock_count;
spinlock_t dust_lock; spinlock_t dust_lock;
unsigned int blksz; unsigned int blksz;
int sect_per_block_shift;
unsigned int sect_per_block; unsigned int sect_per_block;
sector_t start; sector_t start;
bool fail_read_on_bb:1; bool fail_read_on_bb:1;
@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dd->dust_lock, flags); spin_lock_irqsave(&dd->dust_lock, flags);
bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); bblock = dust_rb_search(&dd->badblocklist, block);
if (bblock == NULL) { if (bblock == NULL) {
if (!dd->quiet_mode) { if (!dd->quiet_mode) {
@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
} }
spin_lock_irqsave(&dd->dust_lock, flags); spin_lock_irqsave(&dd->dust_lock, flags);
bblock->bb = block * dd->sect_per_block; bblock->bb = block;
if (!dust_rb_insert(&dd->badblocklist, bblock)) { if (!dust_rb_insert(&dd->badblocklist, bblock)) {
if (!dd->quiet_mode) { if (!dd->quiet_mode) {
DMERR("%s: block %llu already in badblocklist", DMERR("%s: block %llu already in badblocklist",
@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dd->dust_lock, flags); spin_lock_irqsave(&dd->dust_lock, flags);
bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); bblock = dust_rb_search(&dd->badblocklist, block);
if (bblock != NULL) if (bblock != NULL)
DMINFO("%s: block %llu found in badblocklist", __func__, block); DMINFO("%s: block %llu found in badblocklist", __func__, block);
else else
@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
int ret = DM_MAPIO_REMAPPED; int ret = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) { if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags); spin_lock_irqsave(&dd->dust_lock, flags);
ret = __dust_map_read(dd, thisblock); ret = __dust_map_read(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags); spin_unlock_irqrestore(&dd->dust_lock, flags);
@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
unsigned long flags; unsigned long flags;
if (fail_read_on_bb) { if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags); spin_lock_irqsave(&dd->dust_lock, flags);
__dust_map_write(dd, thisblock); __dust_map_write(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags); spin_unlock_irqrestore(&dd->dust_lock, flags);
@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
dd->blksz = blksz; dd->blksz = blksz;
dd->start = tmp; dd->start = tmp;
dd->sect_per_block_shift = __ffs(sect_per_block);
/* /*
* Whether to fail a read on a "bad" block. * Whether to fail a read on a "bad" block.
* Defaults to false; enabled later by message. * Defaults to false; enabled later by message.