forked from Minki/linux
staging: zram: factor-out zram_decompress_page() function
zram_bvec_read() shared decompress functionality with zram_read_before_write() function. Factor-out and make commonly used zram_decompress_page() function, which also simplified error handling in zram_bvec_read(). Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e446f5a854
commit
37b51fdddf
@ -183,88 +183,22 @@ static inline int is_partial_io(struct bio_vec *bvec)
|
||||
return bvec->bv_len != PAGE_SIZE;
|
||||
}
|
||||
|
||||
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
||||
u32 index, int offset, struct bio *bio)
|
||||
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
||||
{
|
||||
int ret;
|
||||
size_t clen;
|
||||
struct page *page;
|
||||
unsigned char *user_mem, *cmem, *uncmem = NULL;
|
||||
|
||||
page = bvec->bv_page;
|
||||
|
||||
if (zram_test_flag(zram, index, ZRAM_ZERO)) {
|
||||
handle_zero_page(bvec);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Requested page is not present in compressed area */
|
||||
if (unlikely(!zram->table[index].handle)) {
|
||||
pr_debug("Read before write: sector=%lu, size=%u",
|
||||
(ulong)(bio->bi_sector), bio->bi_size);
|
||||
handle_zero_page(bvec);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_partial_io(bvec)) {
|
||||
/* Use a temporary buffer to decompress the page */
|
||||
uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!uncmem) {
|
||||
pr_info("Error allocating temp memory!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
user_mem = kmap_atomic(page);
|
||||
if (!is_partial_io(bvec))
|
||||
uncmem = user_mem;
|
||||
clen = PAGE_SIZE;
|
||||
|
||||
cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
|
||||
ZS_MM_RO);
|
||||
|
||||
if (zram->table[index].size == PAGE_SIZE) {
|
||||
memcpy(uncmem, cmem, PAGE_SIZE);
|
||||
ret = LZO_E_OK;
|
||||
} else {
|
||||
ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
|
||||
uncmem, &clen);
|
||||
}
|
||||
|
||||
if (is_partial_io(bvec)) {
|
||||
memcpy(user_mem + bvec->bv_offset, uncmem + offset,
|
||||
bvec->bv_len);
|
||||
kfree(uncmem);
|
||||
}
|
||||
|
||||
zs_unmap_object(zram->mem_pool, zram->table[index].handle);
|
||||
kunmap_atomic(user_mem);
|
||||
|
||||
/* Should NEVER happen. Return bio error if it does. */
|
||||
if (unlikely(ret != LZO_E_OK)) {
|
||||
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_reads);
|
||||
return ret;
|
||||
}
|
||||
|
||||
flush_dcache_page(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
|
||||
{
|
||||
int ret;
|
||||
int ret = LZO_E_OK;
|
||||
size_t clen = PAGE_SIZE;
|
||||
unsigned char *cmem;
|
||||
unsigned long handle = zram->table[index].handle;
|
||||
|
||||
if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
|
||||
if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
|
||||
memset(mem, 0, PAGE_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
|
||||
if (zram->table[index].size == PAGE_SIZE)
|
||||
memcpy(mem, cmem, PAGE_SIZE);
|
||||
else
|
||||
ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
|
||||
mem, &clen);
|
||||
zs_unmap_object(zram->mem_pool, handle);
|
||||
@ -279,6 +213,55 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
||||
u32 index, int offset, struct bio *bio)
|
||||
{
|
||||
int ret;
|
||||
struct page *page;
|
||||
unsigned char *user_mem, *uncmem = NULL;
|
||||
|
||||
page = bvec->bv_page;
|
||||
|
||||
if (unlikely(!zram->table[index].handle) ||
|
||||
zram_test_flag(zram, index, ZRAM_ZERO)) {
|
||||
handle_zero_page(bvec);
|
||||
return 0;
|
||||
}
|
||||
|
||||
user_mem = kmap_atomic(page);
|
||||
if (is_partial_io(bvec))
|
||||
/* Use a temporary buffer to decompress the page */
|
||||
uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
else
|
||||
uncmem = user_mem;
|
||||
|
||||
if (!uncmem) {
|
||||
pr_info("Unable to allocate temp memory\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
ret = zram_decompress_page(zram, uncmem, index);
|
||||
/* Should NEVER happen. Return bio error if it does. */
|
||||
if (unlikely(ret != LZO_E_OK)) {
|
||||
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_reads);
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
if (is_partial_io(bvec))
|
||||
memcpy(user_mem + bvec->bv_offset, uncmem + offset,
|
||||
bvec->bv_len);
|
||||
|
||||
flush_dcache_page(page);
|
||||
ret = 0;
|
||||
out_cleanup:
|
||||
kunmap_atomic(user_mem);
|
||||
if (is_partial_io(bvec))
|
||||
kfree(uncmem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
int offset)
|
||||
{
|
||||
@ -302,7 +285,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ret = zram_read_before_write(zram, uncmem, index);
|
||||
ret = zram_decompress_page(zram, uncmem, index);
|
||||
if (ret) {
|
||||
kfree(uncmem);
|
||||
goto out;
|
||||
|
Loading…
Reference in New Issue
Block a user