block: null_blk: fix 'Invalid parameters' when loading module

On ARM64, the default page size has been 64K on some distributions, and
we should allow ARM64 people to play null_blk.

This patch fixes the issue by extend page bitmap size for supporting
other non-4KB PAGE_SIZE.

Cc: Bart Van Assche <Bart.VanAssche@wdc.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Kyungchan Koh <kkc6196@fb.com>,
Cc: weiping zhang <zhangweiping@didichuxing.com>
Cc: Yi Zhang <yi.zhang@redhat.com>
Reported-by: Yi Zhang <yi.zhang@redhat.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2018-03-06 12:07:13 +08:00 committed by Jens Axboe
parent b5d013bc09
commit 66231ad3e2

View File

@ -74,6 +74,7 @@ enum nullb_device_flags {
NULLB_DEV_FL_CACHE = 3, NULLB_DEV_FL_CACHE = 3,
}; };
#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
/* /*
* nullb_page is a page in memory for nullb devices. * nullb_page is a page in memory for nullb devices.
* *
@ -88,10 +89,10 @@ enum nullb_device_flags {
*/ */
struct nullb_page { struct nullb_page {
struct page *page; struct page *page;
unsigned long bitmap; DECLARE_BITMAP(bitmap, MAP_SZ);
}; };
#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1) #define NULLB_PAGE_LOCK (MAP_SZ - 1)
#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2) #define NULLB_PAGE_FREE (MAP_SZ - 2)
struct nullb_device { struct nullb_device {
struct nullb *nullb; struct nullb *nullb;
@ -733,7 +734,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
if (!t_page->page) if (!t_page->page)
goto out_freepage; goto out_freepage;
t_page->bitmap = 0; memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
return t_page; return t_page;
out_freepage: out_freepage:
kfree(t_page); kfree(t_page);
@ -743,13 +744,20 @@ out:
static void null_free_page(struct nullb_page *t_page) static void null_free_page(struct nullb_page *t_page)
{ {
__set_bit(NULLB_PAGE_FREE, &t_page->bitmap); __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap)) if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
return; return;
__free_page(t_page->page); __free_page(t_page->page);
kfree(t_page); kfree(t_page);
} }
static bool null_page_empty(struct nullb_page *page)
{
int size = MAP_SZ - 2;
return find_first_bit(page->bitmap, size) == size;
}
static void null_free_sector(struct nullb *nullb, sector_t sector, static void null_free_sector(struct nullb *nullb, sector_t sector,
bool is_cache) bool is_cache)
{ {
@ -764,9 +772,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
t_page = radix_tree_lookup(root, idx); t_page = radix_tree_lookup(root, idx);
if (t_page) { if (t_page) {
__clear_bit(sector_bit, &t_page->bitmap); __clear_bit(sector_bit, t_page->bitmap);
if (!t_page->bitmap) { if (null_page_empty(t_page)) {
ret = radix_tree_delete_item(root, idx, t_page); ret = radix_tree_delete_item(root, idx, t_page);
WARN_ON(ret != t_page); WARN_ON(ret != t_page);
null_free_page(ret); null_free_page(ret);
@ -837,7 +845,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
t_page = radix_tree_lookup(root, idx); t_page = radix_tree_lookup(root, idx);
WARN_ON(t_page && t_page->page->index != idx); WARN_ON(t_page && t_page->page->index != idx);
if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap))) if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
return t_page; return t_page;
return NULL; return NULL;
@ -900,10 +908,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
__clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap); __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) { if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
null_free_page(c_page); null_free_page(c_page);
if (t_page && t_page->bitmap == 0) { if (t_page && null_page_empty(t_page)) {
ret = radix_tree_delete_item(&nullb->dev->data, ret = radix_tree_delete_item(&nullb->dev->data,
idx, t_page); idx, t_page);
null_free_page(t_page); null_free_page(t_page);
@ -919,11 +927,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
for (i = 0; i < PAGE_SECTORS; for (i = 0; i < PAGE_SECTORS;
i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
if (test_bit(i, &c_page->bitmap)) { if (test_bit(i, c_page->bitmap)) {
offset = (i << SECTOR_SHIFT); offset = (i << SECTOR_SHIFT);
memcpy(dst + offset, src + offset, memcpy(dst + offset, src + offset,
nullb->dev->blocksize); nullb->dev->blocksize);
__set_bit(i, &t_page->bitmap); __set_bit(i, t_page->bitmap);
} }
} }
@ -960,10 +968,10 @@ again:
* We found the page which is being flushed to disk by other * We found the page which is being flushed to disk by other
* threads * threads
*/ */
if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap)) if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
c_pages[i] = NULL; c_pages[i] = NULL;
else else
__set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap); __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
} }
one_round = 0; one_round = 0;
@ -1016,7 +1024,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
kunmap_atomic(dst); kunmap_atomic(dst);
kunmap_atomic(src); kunmap_atomic(src);
__set_bit(sector & SECTOR_MASK, &t_page->bitmap); __set_bit(sector & SECTOR_MASK, t_page->bitmap);
if (is_fua) if (is_fua)
null_free_sector(nullb, sector, true); null_free_sector(nullb, sector, true);
@ -1846,10 +1854,6 @@ static int __init null_init(void)
struct nullb *nullb; struct nullb *nullb;
struct nullb_device *dev; struct nullb_device *dev;
/* check for nullb_page.bitmap */
if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
return -EINVAL;
if (g_bs > PAGE_SIZE) { if (g_bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: invalid block size\n");
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);