mirror of
https://github.com/torvalds/linux.git
synced 2024-11-02 10:11:36 +00:00
Staging: zram: Refactor zram_read/write() functions
This patch refactor the code of zram_read/write() functions. It does not removes a lot of duplicate code alone, but is mostly a helper for the third patch of this series (Staging: zram: allow partial page operations). Signed-off-by: Jerome Marchand <jmarchan@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
6a587e835e
commit
8c921b2b43
@ -203,196 +203,199 @@ static void handle_uncompressed_page(struct zram *zram,
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
||||
static void zram_read(struct zram *zram, struct bio *bio)
|
||||
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
||||
u32 index, struct bio *bio)
|
||||
{
|
||||
int ret;
|
||||
size_t clen;
|
||||
struct page *page;
|
||||
struct zobj_header *zheader;
|
||||
unsigned char *user_mem, *cmem;
|
||||
|
||||
int i;
|
||||
u32 index;
|
||||
struct bio_vec *bvec;
|
||||
page = bvec->bv_page;
|
||||
|
||||
zram_stat64_inc(zram, &zram->stats.num_reads);
|
||||
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
int ret;
|
||||
size_t clen;
|
||||
struct page *page;
|
||||
struct zobj_header *zheader;
|
||||
unsigned char *user_mem, *cmem;
|
||||
|
||||
page = bvec->bv_page;
|
||||
|
||||
if (zram_test_flag(zram, index, ZRAM_ZERO)) {
|
||||
handle_zero_page(page);
|
||||
index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Requested page is not present in compressed area */
|
||||
if (unlikely(!zram->table[index].page)) {
|
||||
pr_debug("Read before write: sector=%lu, size=%u",
|
||||
(ulong)(bio->bi_sector), bio->bi_size);
|
||||
handle_zero_page(page);
|
||||
index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Page is stored uncompressed since it's incompressible */
|
||||
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
|
||||
handle_uncompressed_page(zram, page, index);
|
||||
index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
user_mem = kmap_atomic(page, KM_USER0);
|
||||
clen = PAGE_SIZE;
|
||||
|
||||
cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
|
||||
zram->table[index].offset;
|
||||
|
||||
ret = lzo1x_decompress_safe(
|
||||
cmem + sizeof(*zheader),
|
||||
xv_get_object_size(cmem) - sizeof(*zheader),
|
||||
user_mem, &clen);
|
||||
|
||||
kunmap_atomic(user_mem, KM_USER0);
|
||||
kunmap_atomic(cmem, KM_USER1);
|
||||
|
||||
/* Should NEVER happen. Return bio error if it does. */
|
||||
if (unlikely(ret != LZO_E_OK)) {
|
||||
pr_err("Decompression failed! err=%d, page=%u\n",
|
||||
ret, index);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_reads);
|
||||
goto out;
|
||||
}
|
||||
|
||||
flush_dcache_page(page);
|
||||
index++;
|
||||
if (zram_test_flag(zram, index, ZRAM_ZERO)) {
|
||||
handle_zero_page(page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
/* Requested page is not present in compressed area */
|
||||
if (unlikely(!zram->table[index].page)) {
|
||||
pr_debug("Read before write: sector=%lu, size=%u",
|
||||
(ulong)(bio->bi_sector), bio->bi_size);
|
||||
handle_zero_page(page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
bio_io_error(bio);
|
||||
/* Page is stored uncompressed since it's incompressible */
|
||||
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
|
||||
handle_uncompressed_page(zram, page, index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
user_mem = kmap_atomic(page, KM_USER0);
|
||||
clen = PAGE_SIZE;
|
||||
|
||||
cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
|
||||
zram->table[index].offset;
|
||||
|
||||
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
|
||||
xv_get_object_size(cmem) - sizeof(*zheader),
|
||||
user_mem, &clen);
|
||||
|
||||
kunmap_atomic(user_mem, KM_USER0);
|
||||
kunmap_atomic(cmem, KM_USER1);
|
||||
|
||||
/* Should NEVER happen. Return bio error if it does. */
|
||||
if (unlikely(ret != LZO_E_OK)) {
|
||||
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_reads);
|
||||
return ret;
|
||||
}
|
||||
|
||||
flush_dcache_page(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zram_write(struct zram *zram, struct bio *bio)
|
||||
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
|
||||
{
|
||||
int ret;
|
||||
u32 offset;
|
||||
size_t clen;
|
||||
struct zobj_header *zheader;
|
||||
struct page *page, *page_store;
|
||||
unsigned char *user_mem, *cmem, *src;
|
||||
|
||||
page = bvec->bv_page;
|
||||
src = zram->compress_buffer;
|
||||
|
||||
/*
|
||||
* System overwrites unused sectors. Free memory associated
|
||||
* with this sector now.
|
||||
*/
|
||||
if (zram->table[index].page ||
|
||||
zram_test_flag(zram, index, ZRAM_ZERO))
|
||||
zram_free_page(zram, index);
|
||||
|
||||
mutex_lock(&zram->lock);
|
||||
|
||||
user_mem = kmap_atomic(page, KM_USER0);
|
||||
if (page_zero_filled(user_mem)) {
|
||||
kunmap_atomic(user_mem, KM_USER0);
|
||||
mutex_unlock(&zram->lock);
|
||||
zram_stat_inc(&zram->stats.pages_zero);
|
||||
zram_set_flag(zram, index, ZRAM_ZERO);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
|
||||
zram->compress_workmem);
|
||||
|
||||
kunmap_atomic(user_mem, KM_USER0);
|
||||
|
||||
if (unlikely(ret != LZO_E_OK)) {
|
||||
mutex_unlock(&zram->lock);
|
||||
pr_err("Compression failed! err=%d\n", ret);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_writes);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Page is incompressible. Store it as-is (uncompressed)
|
||||
* since we do not want to return too many disk write
|
||||
* errors which has side effect of hanging the system.
|
||||
*/
|
||||
if (unlikely(clen > max_zpage_size)) {
|
||||
clen = PAGE_SIZE;
|
||||
page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
|
||||
if (unlikely(!page_store)) {
|
||||
mutex_unlock(&zram->lock);
|
||||
pr_info("Error allocating memory for "
|
||||
"incompressible page: %u\n", index);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_writes);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
offset = 0;
|
||||
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
|
||||
zram_stat_inc(&zram->stats.pages_expand);
|
||||
zram->table[index].page = page_store;
|
||||
src = kmap_atomic(page, KM_USER0);
|
||||
goto memstore;
|
||||
}
|
||||
|
||||
if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
|
||||
&zram->table[index].page, &offset,
|
||||
GFP_NOIO | __GFP_HIGHMEM)) {
|
||||
mutex_unlock(&zram->lock);
|
||||
pr_info("Error allocating memory for compressed "
|
||||
"page: %u, size=%zu\n", index, clen);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_writes);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memstore:
|
||||
zram->table[index].offset = offset;
|
||||
|
||||
cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
|
||||
zram->table[index].offset;
|
||||
|
||||
#if 0
|
||||
/* Back-reference needed for memory defragmentation */
|
||||
if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
|
||||
zheader = (struct zobj_header *)cmem;
|
||||
zheader->table_idx = index;
|
||||
cmem += sizeof(*zheader);
|
||||
}
|
||||
#endif
|
||||
|
||||
memcpy(cmem, src, clen);
|
||||
|
||||
kunmap_atomic(cmem, KM_USER1);
|
||||
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
|
||||
kunmap_atomic(src, KM_USER0);
|
||||
|
||||
/* Update stats */
|
||||
zram_stat64_add(zram, &zram->stats.compr_size, clen);
|
||||
zram_stat_inc(&zram->stats.pages_stored);
|
||||
if (clen <= PAGE_SIZE / 2)
|
||||
zram_stat_inc(&zram->stats.good_compress);
|
||||
|
||||
mutex_unlock(&zram->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
struct bio *bio, int rw)
|
||||
{
|
||||
if (rw == READ)
|
||||
return zram_bvec_read(zram, bvec, index, bio);
|
||||
|
||||
return zram_bvec_write(zram, bvec, index);
|
||||
}
|
||||
|
||||
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
|
||||
{
|
||||
int i;
|
||||
u32 index;
|
||||
struct bio_vec *bvec;
|
||||
|
||||
zram_stat64_inc(zram, &zram->stats.num_writes);
|
||||
switch (rw) {
|
||||
case READ:
|
||||
zram_stat64_inc(zram, &zram->stats.num_reads);
|
||||
break;
|
||||
case WRITE:
|
||||
zram_stat64_inc(zram, &zram->stats.num_writes);
|
||||
break;
|
||||
}
|
||||
|
||||
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
int ret;
|
||||
u32 offset;
|
||||
size_t clen;
|
||||
struct zobj_header *zheader;
|
||||
struct page *page, *page_store;
|
||||
unsigned char *user_mem, *cmem, *src;
|
||||
|
||||
page = bvec->bv_page;
|
||||
src = zram->compress_buffer;
|
||||
|
||||
/*
|
||||
* System overwrites unused sectors. Free memory associated
|
||||
* with this sector now.
|
||||
*/
|
||||
if (zram->table[index].page ||
|
||||
zram_test_flag(zram, index, ZRAM_ZERO))
|
||||
zram_free_page(zram, index);
|
||||
|
||||
mutex_lock(&zram->lock);
|
||||
|
||||
user_mem = kmap_atomic(page, KM_USER0);
|
||||
if (page_zero_filled(user_mem)) {
|
||||
kunmap_atomic(user_mem, KM_USER0);
|
||||
mutex_unlock(&zram->lock);
|
||||
zram_stat_inc(&zram->stats.pages_zero);
|
||||
zram_set_flag(zram, index, ZRAM_ZERO);
|
||||
index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
|
||||
zram->compress_workmem);
|
||||
|
||||
kunmap_atomic(user_mem, KM_USER0);
|
||||
|
||||
if (unlikely(ret != LZO_E_OK)) {
|
||||
mutex_unlock(&zram->lock);
|
||||
pr_err("Compression failed! err=%d\n", ret);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_writes);
|
||||
if (zram_bvec_rw(zram, bvec, index, bio, rw) < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Page is incompressible. Store it as-is (uncompressed)
|
||||
* since we do not want to return too many disk write
|
||||
* errors which has side effect of hanging the system.
|
||||
*/
|
||||
if (unlikely(clen > max_zpage_size)) {
|
||||
clen = PAGE_SIZE;
|
||||
page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
|
||||
if (unlikely(!page_store)) {
|
||||
mutex_unlock(&zram->lock);
|
||||
pr_info("Error allocating memory for "
|
||||
"incompressible page: %u\n", index);
|
||||
zram_stat64_inc(zram,
|
||||
&zram->stats.failed_writes);
|
||||
goto out;
|
||||
}
|
||||
|
||||
offset = 0;
|
||||
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
|
||||
zram_stat_inc(&zram->stats.pages_expand);
|
||||
zram->table[index].page = page_store;
|
||||
src = kmap_atomic(page, KM_USER0);
|
||||
goto memstore;
|
||||
}
|
||||
|
||||
if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
|
||||
&zram->table[index].page, &offset,
|
||||
GFP_NOIO | __GFP_HIGHMEM)) {
|
||||
mutex_unlock(&zram->lock);
|
||||
pr_info("Error allocating memory for compressed "
|
||||
"page: %u, size=%zu\n", index, clen);
|
||||
zram_stat64_inc(zram, &zram->stats.failed_writes);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memstore:
|
||||
zram->table[index].offset = offset;
|
||||
|
||||
cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
|
||||
zram->table[index].offset;
|
||||
|
||||
#if 0
|
||||
/* Back-reference needed for memory defragmentation */
|
||||
if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
|
||||
zheader = (struct zobj_header *)cmem;
|
||||
zheader->table_idx = index;
|
||||
cmem += sizeof(*zheader);
|
||||
}
|
||||
#endif
|
||||
|
||||
memcpy(cmem, src, clen);
|
||||
|
||||
kunmap_atomic(cmem, KM_USER1);
|
||||
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
|
||||
kunmap_atomic(src, KM_USER0);
|
||||
|
||||
/* Update stats */
|
||||
zram_stat64_add(zram, &zram->stats.compr_size, clen);
|
||||
zram_stat_inc(&zram->stats.pages_stored);
|
||||
if (clen <= PAGE_SIZE / 2)
|
||||
zram_stat_inc(&zram->stats.good_compress);
|
||||
|
||||
mutex_unlock(&zram->lock);
|
||||
index++;
|
||||
}
|
||||
|
||||
@ -439,15 +442,7 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (bio_data_dir(bio)) {
|
||||
case READ:
|
||||
zram_read(zram, bio);
|
||||
break;
|
||||
|
||||
case WRITE:
|
||||
zram_write(zram, bio);
|
||||
break;
|
||||
}
|
||||
__zram_make_request(zram, bio, bio_data_dir(bio));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user