forked from Minki/linux
d2d5e762c8
Currently, we use a rwlock tb_lock to protect concurrent access to the
whole zram meta table. However, according to the actual access model,
there is only a small chance for upper user to access the same
table[index], so the current lock granularity is too big.
The idea of optimization is to change the lock granularity from whole
meta table to per table entry (table -> table[index]), so that we can
protect concurrent access to the same table[index], meanwhile allow the
maximum concurrency.
With this in mind, several kinds of locks which could be used as a
per-entry lock were tested and compared:
Test environment:
x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04,
kernel v3.15.0-rc3 as base, zram with 4 max_comp_streams LZO.
iozone test:
iozone -t 4 -R -r 16K -s 200M -I +Z
(1GB zram with ext4 filesystem, take the average of 10 tests, KB/s)
Test base CAS spinlock rwlock bit_spinlock
-------------------------------------------------------------------
Initial write 1381094 1425435 1422860 1423075 1421521
Rewrite 1529479 1641199 1668762 1672855 1654910
Read 8468009 11324979 11305569 11117273 10997202
Re-read 8467476 11260914 11248059 11145336 10906486
Reverse Read 6821393 8106334 8282174 8279195 8109186
Stride read 7191093 8994306 9153982 8961224 9004434
Random read 7156353 8957932 9167098 8980465 8940476
Mixed workload 4172747 5680814 5927825 5489578 5972253
Random write 1483044 1605588 1594329 1600453 1596010
Pwrite 1276644 1303108 1311612 1314228 1300960
Pread 4324337 4632869 4618386 4457870 4500166
To enhance the possibility of access the same table[index] concurrently,
set zram a small disksize(10MB) and let threads run with large loop
count.
fio test:
fio --bs=32k --randrepeat=1 --randseed=100 --refill_buffers
--scramble_buffers=1 --direct=1 --loops=3000 --numjobs=4
--filename=/dev/zram0 --name=seq-write --rw=write --stonewall
--name=seq-read --rw=read --stonewall --name=seq-readwrite
--rw=rw --stonewall --name=rand-readwrite --rw=randrw --stonewall
(10MB zram raw block device, take the average of 10 tests, KB/s)
Test base CAS spinlock rwlock bit_spinlock
-------------------------------------------------------------
seq-write 933789 999357 1003298 995961 1001958
seq-read 5634130 6577930 6380861 6243912 6230006
seq-rw 1405687 1638117 1640256 1633903 1634459
rand-rw 1386119 1614664 1617211 1609267
1612471
All the optimization methods show a higher performance than the base,
however, it is hard to say which method is the most appropriate.
On the other hand, zram is mostly used on small embedded system, so we
don't want to increase any memory footprint.
This patch pick the bit_spinlock method, pack object size and page_flag
into an unsigned long table.value, so as to not increase any memory
overhead on both 32-bit and 64-bit system.
On the third hand, even though different kinds of locks have different
performances, we can ignore this difference, because: if zram is used as
zram swapfile, the swap subsystem can prevent concurrent access to the
same swapslot; if zram is used as zram-blk for set up filesystem on it,
the upper filesystem and the page cache also prevent concurrent access
of the same block mostly. So we can ignore the different performances
among locks.
Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Reviewed-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Weijie Yang <weijie.yang@samsung.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1062 lines
25 KiB
C
1062 lines
25 KiB
C
/*
|
|
* Compressed RAM block device
|
|
*
|
|
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
|
|
* 2012, 2013 Minchan Kim
|
|
*
|
|
* This code is released using a dual license strategy: BSD/GPL
|
|
* You can choose the licence that better fits your requirements.
|
|
*
|
|
* Released under the terms of 3-clause BSD License
|
|
* Released under the terms of GNU General Public License Version 2.0
|
|
*
|
|
*/
|
|
|
|
#define KMSG_COMPONENT "zram"
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
#ifdef CONFIG_ZRAM_DEBUG
|
|
#define DEBUG
|
|
#endif
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/buffer_head.h>
|
|
#include <linux/device.h>
|
|
#include <linux/genhd.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/err.h>
|
|
|
|
#include "zram_drv.h"
|
|
|
|
/* Globals */
|
|
static int zram_major;
|
|
static struct zram *zram_devices;
|
|
static const char *default_compressor = "lzo";
|
|
|
|
/* Module params (documentation at end) */
|
|
static unsigned int num_devices = 1;
|
|
|
|
#define ZRAM_ATTR_RO(name) \
|
|
static ssize_t zram_attr_##name##_show(struct device *d, \
|
|
struct device_attribute *attr, char *b) \
|
|
{ \
|
|
struct zram *zram = dev_to_zram(d); \
|
|
return scnprintf(b, PAGE_SIZE, "%llu\n", \
|
|
(u64)atomic64_read(&zram->stats.name)); \
|
|
} \
|
|
static struct device_attribute dev_attr_##name = \
|
|
__ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
|
|
|
|
static inline int init_done(struct zram *zram)
|
|
{
|
|
return zram->meta != NULL;
|
|
}
|
|
|
|
static inline struct zram *dev_to_zram(struct device *dev)
|
|
{
|
|
return (struct zram *)dev_to_disk(dev)->private_data;
|
|
}
|
|
|
|
static ssize_t disksize_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct zram *zram = dev_to_zram(dev);
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
|
|
}
|
|
|
|
static ssize_t initstate_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
u32 val;
|
|
struct zram *zram = dev_to_zram(dev);
|
|
|
|
down_read(&zram->init_lock);
|
|
val = init_done(zram);
|
|
up_read(&zram->init_lock);
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
|
|
}
|
|
|
|
static ssize_t orig_data_size_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct zram *zram = dev_to_zram(dev);
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "%llu\n",
|
|
(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
|
|
}
|
|
|
|
static ssize_t mem_used_total_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
u64 val = 0;
|
|
struct zram *zram = dev_to_zram(dev);
|
|
struct zram_meta *meta = zram->meta;
|
|
|
|
down_read(&zram->init_lock);
|
|
if (init_done(zram))
|
|
val = zs_get_total_size_bytes(meta->mem_pool);
|
|
up_read(&zram->init_lock);
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
|
|
}
|
|
|
|
static ssize_t max_comp_streams_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
int val;
|
|
struct zram *zram = dev_to_zram(dev);
|
|
|
|
down_read(&zram->init_lock);
|
|
val = zram->max_comp_streams;
|
|
up_read(&zram->init_lock);
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
|
|
}
|
|
|
|
static ssize_t max_comp_streams_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
{
|
|
int num;
|
|
struct zram *zram = dev_to_zram(dev);
|
|
int ret;
|
|
|
|
ret = kstrtoint(buf, 0, &num);
|
|
if (ret < 0)
|
|
return ret;
|
|
if (num < 1)
|
|
return -EINVAL;
|
|
|
|
down_write(&zram->init_lock);
|
|
if (init_done(zram)) {
|
|
if (!zcomp_set_max_streams(zram->comp, num)) {
|
|
pr_info("Cannot change max compression streams\n");
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
zram->max_comp_streams = num;
|
|
ret = len;
|
|
out:
|
|
up_write(&zram->init_lock);
|
|
return ret;
|
|
}
|
|
|
|
static ssize_t comp_algorithm_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
size_t sz;
|
|
struct zram *zram = dev_to_zram(dev);
|
|
|
|
down_read(&zram->init_lock);
|
|
sz = zcomp_available_show(zram->compressor, buf);
|
|
up_read(&zram->init_lock);
|
|
|
|
return sz;
|
|
}
|
|
|
|
static ssize_t comp_algorithm_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
{
|
|
struct zram *zram = dev_to_zram(dev);
|
|
down_write(&zram->init_lock);
|
|
if (init_done(zram)) {
|
|
up_write(&zram->init_lock);
|
|
pr_info("Can't change algorithm for initialized device\n");
|
|
return -EBUSY;
|
|
}
|
|
strlcpy(zram->compressor, buf, sizeof(zram->compressor));
|
|
up_write(&zram->init_lock);
|
|
return len;
|
|
}
|
|
|
|
/* flag operations needs meta->tb_lock */
|
|
static int zram_test_flag(struct zram_meta *meta, u32 index,
|
|
enum zram_pageflags flag)
|
|
{
|
|
return meta->table[index].value & BIT(flag);
|
|
}
|
|
|
|
static void zram_set_flag(struct zram_meta *meta, u32 index,
|
|
enum zram_pageflags flag)
|
|
{
|
|
meta->table[index].value |= BIT(flag);
|
|
}
|
|
|
|
static void zram_clear_flag(struct zram_meta *meta, u32 index,
|
|
enum zram_pageflags flag)
|
|
{
|
|
meta->table[index].value &= ~BIT(flag);
|
|
}
|
|
|
|
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
|
|
{
|
|
return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
|
|
}
|
|
|
|
static void zram_set_obj_size(struct zram_meta *meta,
|
|
u32 index, size_t size)
|
|
{
|
|
unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
|
|
|
|
meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
|
|
}
|
|
|
|
static inline int is_partial_io(struct bio_vec *bvec)
|
|
{
|
|
return bvec->bv_len != PAGE_SIZE;
|
|
}
|
|
|
|
/*
|
|
* Check if request is within bounds and aligned on zram logical blocks.
|
|
*/
|
|
static inline int valid_io_request(struct zram *zram, struct bio *bio)
|
|
{
|
|
u64 start, end, bound;
|
|
|
|
/* unaligned request */
|
|
if (unlikely(bio->bi_iter.bi_sector &
|
|
(ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
|
|
return 0;
|
|
if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
|
|
return 0;
|
|
|
|
start = bio->bi_iter.bi_sector;
|
|
end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
|
|
bound = zram->disksize >> SECTOR_SHIFT;
|
|
/* out of range range */
|
|
if (unlikely(start >= bound || end > bound || start > end))
|
|
return 0;
|
|
|
|
/* I/O request is valid */
|
|
return 1;
|
|
}
|
|
|
|
static void zram_meta_free(struct zram_meta *meta)
|
|
{
|
|
zs_destroy_pool(meta->mem_pool);
|
|
vfree(meta->table);
|
|
kfree(meta);
|
|
}
|
|
|
|
static struct zram_meta *zram_meta_alloc(u64 disksize)
|
|
{
|
|
size_t num_pages;
|
|
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
|
|
if (!meta)
|
|
goto out;
|
|
|
|
num_pages = disksize >> PAGE_SHIFT;
|
|
meta->table = vzalloc(num_pages * sizeof(*meta->table));
|
|
if (!meta->table) {
|
|
pr_err("Error allocating zram address table\n");
|
|
goto free_meta;
|
|
}
|
|
|
|
meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
|
|
if (!meta->mem_pool) {
|
|
pr_err("Error creating memory pool\n");
|
|
goto free_table;
|
|
}
|
|
|
|
return meta;
|
|
|
|
free_table:
|
|
vfree(meta->table);
|
|
free_meta:
|
|
kfree(meta);
|
|
meta = NULL;
|
|
out:
|
|
return meta;
|
|
}
|
|
|
|
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
|
|
{
|
|
if (*offset + bvec->bv_len >= PAGE_SIZE)
|
|
(*index)++;
|
|
*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
|
|
}
|
|
|
|
static int page_zero_filled(void *ptr)
|
|
{
|
|
unsigned int pos;
|
|
unsigned long *page;
|
|
|
|
page = (unsigned long *)ptr;
|
|
|
|
for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
|
|
if (page[pos])
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static void handle_zero_page(struct bio_vec *bvec)
|
|
{
|
|
struct page *page = bvec->bv_page;
|
|
void *user_mem;
|
|
|
|
user_mem = kmap_atomic(page);
|
|
if (is_partial_io(bvec))
|
|
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
|
|
else
|
|
clear_page(user_mem);
|
|
kunmap_atomic(user_mem);
|
|
|
|
flush_dcache_page(page);
|
|
}
|
|
|
|
|
|
/*
|
|
* To protect concurrent access to the same index entry,
|
|
* caller should hold this table index entry's bit_spinlock to
|
|
* indicate this index entry is accessing.
|
|
*/
|
|
static void zram_free_page(struct zram *zram, size_t index)
|
|
{
|
|
struct zram_meta *meta = zram->meta;
|
|
unsigned long handle = meta->table[index].handle;
|
|
|
|
if (unlikely(!handle)) {
|
|
/*
|
|
* No memory is allocated for zero filled pages.
|
|
* Simply clear zero page flag.
|
|
*/
|
|
if (zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
zram_clear_flag(meta, index, ZRAM_ZERO);
|
|
atomic64_dec(&zram->stats.zero_pages);
|
|
}
|
|
return;
|
|
}
|
|
|
|
zs_free(meta->mem_pool, handle);
|
|
|
|
atomic64_sub(zram_get_obj_size(meta, index),
|
|
&zram->stats.compr_data_size);
|
|
atomic64_dec(&zram->stats.pages_stored);
|
|
|
|
meta->table[index].handle = 0;
|
|
zram_set_obj_size(meta, index, 0);
|
|
}
|
|
|
|
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
{
|
|
int ret = 0;
|
|
unsigned char *cmem;
|
|
struct zram_meta *meta = zram->meta;
|
|
unsigned long handle;
|
|
size_t size;
|
|
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
handle = meta->table[index].handle;
|
|
size = zram_get_obj_size(meta, index);
|
|
|
|
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
clear_page(mem);
|
|
return 0;
|
|
}
|
|
|
|
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
|
|
if (size == PAGE_SIZE)
|
|
copy_page(mem, cmem);
|
|
else
|
|
ret = zcomp_decompress(zram->comp, cmem, size, mem);
|
|
zs_unmap_object(meta->mem_pool, handle);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
if (unlikely(ret)) {
|
|
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
|
|
atomic64_inc(&zram->stats.failed_reads);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
u32 index, int offset, struct bio *bio)
|
|
{
|
|
int ret;
|
|
struct page *page;
|
|
unsigned char *user_mem, *uncmem = NULL;
|
|
struct zram_meta *meta = zram->meta;
|
|
page = bvec->bv_page;
|
|
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
if (unlikely(!meta->table[index].handle) ||
|
|
zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
handle_zero_page(bvec);
|
|
return 0;
|
|
}
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
if (is_partial_io(bvec))
|
|
/* Use a temporary buffer to decompress the page */
|
|
uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
|
|
|
|
user_mem = kmap_atomic(page);
|
|
if (!is_partial_io(bvec))
|
|
uncmem = user_mem;
|
|
|
|
if (!uncmem) {
|
|
pr_info("Unable to allocate temp memory\n");
|
|
ret = -ENOMEM;
|
|
goto out_cleanup;
|
|
}
|
|
|
|
ret = zram_decompress_page(zram, uncmem, index);
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
if (unlikely(ret))
|
|
goto out_cleanup;
|
|
|
|
if (is_partial_io(bvec))
|
|
memcpy(user_mem + bvec->bv_offset, uncmem + offset,
|
|
bvec->bv_len);
|
|
|
|
flush_dcache_page(page);
|
|
ret = 0;
|
|
out_cleanup:
|
|
kunmap_atomic(user_mem);
|
|
if (is_partial_io(bvec))
|
|
kfree(uncmem);
|
|
return ret;
|
|
}
|
|
|
|
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
int offset)
|
|
{
|
|
int ret = 0;
|
|
size_t clen;
|
|
unsigned long handle;
|
|
struct page *page;
|
|
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
|
|
struct zram_meta *meta = zram->meta;
|
|
struct zcomp_strm *zstrm;
|
|
bool locked = false;
|
|
|
|
page = bvec->bv_page;
|
|
if (is_partial_io(bvec)) {
|
|
/*
|
|
* This is a partial IO. We need to read the full page
|
|
* before to write the changes.
|
|
*/
|
|
uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
|
|
if (!uncmem) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
ret = zram_decompress_page(zram, uncmem, index);
|
|
if (ret)
|
|
goto out;
|
|
}
|
|
|
|
zstrm = zcomp_strm_find(zram->comp);
|
|
locked = true;
|
|
user_mem = kmap_atomic(page);
|
|
|
|
if (is_partial_io(bvec)) {
|
|
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
|
|
bvec->bv_len);
|
|
kunmap_atomic(user_mem);
|
|
user_mem = NULL;
|
|
} else {
|
|
uncmem = user_mem;
|
|
}
|
|
|
|
if (page_zero_filled(uncmem)) {
|
|
kunmap_atomic(user_mem);
|
|
/* Free memory associated with this sector now. */
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
zram_free_page(zram, index);
|
|
zram_set_flag(meta, index, ZRAM_ZERO);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
atomic64_inc(&zram->stats.zero_pages);
|
|
ret = 0;
|
|
goto out;
|
|
}
|
|
|
|
ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
|
|
if (!is_partial_io(bvec)) {
|
|
kunmap_atomic(user_mem);
|
|
user_mem = NULL;
|
|
uncmem = NULL;
|
|
}
|
|
|
|
if (unlikely(ret)) {
|
|
pr_err("Compression failed! err=%d\n", ret);
|
|
goto out;
|
|
}
|
|
src = zstrm->buffer;
|
|
if (unlikely(clen > max_zpage_size)) {
|
|
clen = PAGE_SIZE;
|
|
if (is_partial_io(bvec))
|
|
src = uncmem;
|
|
}
|
|
|
|
handle = zs_malloc(meta->mem_pool, clen);
|
|
if (!handle) {
|
|
pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
|
|
index, clen);
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
|
|
|
|
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
|
|
src = kmap_atomic(page);
|
|
copy_page(cmem, src);
|
|
kunmap_atomic(src);
|
|
} else {
|
|
memcpy(cmem, src, clen);
|
|
}
|
|
|
|
zcomp_strm_release(zram->comp, zstrm);
|
|
locked = false;
|
|
zs_unmap_object(meta->mem_pool, handle);
|
|
|
|
/*
|
|
* Free memory associated with this sector
|
|
* before overwriting unused sectors.
|
|
*/
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
zram_free_page(zram, index);
|
|
|
|
meta->table[index].handle = handle;
|
|
zram_set_obj_size(meta, index, clen);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
/* Update stats */
|
|
atomic64_add(clen, &zram->stats.compr_data_size);
|
|
atomic64_inc(&zram->stats.pages_stored);
|
|
out:
|
|
if (locked)
|
|
zcomp_strm_release(zram->comp, zstrm);
|
|
if (is_partial_io(bvec))
|
|
kfree(uncmem);
|
|
if (ret)
|
|
atomic64_inc(&zram->stats.failed_writes);
|
|
return ret;
|
|
}
|
|
|
|
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
int offset, struct bio *bio)
|
|
{
|
|
int ret;
|
|
int rw = bio_data_dir(bio);
|
|
|
|
if (rw == READ) {
|
|
atomic64_inc(&zram->stats.num_reads);
|
|
ret = zram_bvec_read(zram, bvec, index, offset, bio);
|
|
} else {
|
|
atomic64_inc(&zram->stats.num_writes);
|
|
ret = zram_bvec_write(zram, bvec, index, offset);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* zram_bio_discard - handler on discard request
|
|
* @index: physical block index in PAGE_SIZE units
|
|
* @offset: byte offset within physical block
|
|
*/
|
|
static void zram_bio_discard(struct zram *zram, u32 index,
|
|
int offset, struct bio *bio)
|
|
{
|
|
size_t n = bio->bi_iter.bi_size;
|
|
struct zram_meta *meta = zram->meta;
|
|
|
|
/*
|
|
* zram manages data in physical block size units. Because logical block
|
|
* size isn't identical with physical block size on some arch, we
|
|
* could get a discard request pointing to a specific offset within a
|
|
* certain physical block. Although we can handle this request by
|
|
* reading that physiclal block and decompressing and partially zeroing
|
|
* and re-compressing and then re-storing it, this isn't reasonable
|
|
* because our intent with a discard request is to save memory. So
|
|
* skipping this logical block is appropriate here.
|
|
*/
|
|
if (offset) {
|
|
if (n <= (PAGE_SIZE - offset))
|
|
return;
|
|
|
|
n -= (PAGE_SIZE - offset);
|
|
index++;
|
|
}
|
|
|
|
while (n >= PAGE_SIZE) {
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
zram_free_page(zram, index);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
index++;
|
|
n -= PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
static void zram_reset_device(struct zram *zram, bool reset_capacity)
|
|
{
|
|
size_t index;
|
|
struct zram_meta *meta;
|
|
|
|
down_write(&zram->init_lock);
|
|
if (!init_done(zram)) {
|
|
up_write(&zram->init_lock);
|
|
return;
|
|
}
|
|
|
|
meta = zram->meta;
|
|
/* Free all pages that are still in this zram device */
|
|
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
|
|
unsigned long handle = meta->table[index].handle;
|
|
if (!handle)
|
|
continue;
|
|
|
|
zs_free(meta->mem_pool, handle);
|
|
}
|
|
|
|
zcomp_destroy(zram->comp);
|
|
zram->max_comp_streams = 1;
|
|
|
|
zram_meta_free(zram->meta);
|
|
zram->meta = NULL;
|
|
/* Reset stats */
|
|
memset(&zram->stats, 0, sizeof(zram->stats));
|
|
|
|
zram->disksize = 0;
|
|
if (reset_capacity)
|
|
set_capacity(zram->disk, 0);
|
|
|
|
up_write(&zram->init_lock);
|
|
|
|
/*
|
|
* Revalidate disk out of the init_lock to avoid lockdep splat.
|
|
* It's okay because disk's capacity is protected by init_lock
|
|
* so that revalidate_disk always sees up-to-date capacity.
|
|
*/
|
|
if (reset_capacity)
|
|
revalidate_disk(zram->disk);
|
|
}
|
|
|
|
static ssize_t disksize_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
{
|
|
u64 disksize;
|
|
struct zcomp *comp;
|
|
struct zram_meta *meta;
|
|
struct zram *zram = dev_to_zram(dev);
|
|
int err;
|
|
|
|
disksize = memparse(buf, NULL);
|
|
if (!disksize)
|
|
return -EINVAL;
|
|
|
|
disksize = PAGE_ALIGN(disksize);
|
|
meta = zram_meta_alloc(disksize);
|
|
if (!meta)
|
|
return -ENOMEM;
|
|
|
|
comp = zcomp_create(zram->compressor, zram->max_comp_streams);
|
|
if (IS_ERR(comp)) {
|
|
pr_info("Cannot initialise %s compressing backend\n",
|
|
zram->compressor);
|
|
err = PTR_ERR(comp);
|
|
goto out_free_meta;
|
|
}
|
|
|
|
down_write(&zram->init_lock);
|
|
if (init_done(zram)) {
|
|
pr_info("Cannot change disksize for initialized device\n");
|
|
err = -EBUSY;
|
|
goto out_destroy_comp;
|
|
}
|
|
|
|
zram->meta = meta;
|
|
zram->comp = comp;
|
|
zram->disksize = disksize;
|
|
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
|
|
up_write(&zram->init_lock);
|
|
|
|
/*
|
|
* Revalidate disk out of the init_lock to avoid lockdep splat.
|
|
* It's okay because disk's capacity is protected by init_lock
|
|
* so that revalidate_disk always sees up-to-date capacity.
|
|
*/
|
|
revalidate_disk(zram->disk);
|
|
|
|
return len;
|
|
|
|
out_destroy_comp:
|
|
up_write(&zram->init_lock);
|
|
zcomp_destroy(comp);
|
|
out_free_meta:
|
|
zram_meta_free(meta);
|
|
return err;
|
|
}
|
|
|
|
static ssize_t reset_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
{
|
|
int ret;
|
|
unsigned short do_reset;
|
|
struct zram *zram;
|
|
struct block_device *bdev;
|
|
|
|
zram = dev_to_zram(dev);
|
|
bdev = bdget_disk(zram->disk, 0);
|
|
|
|
if (!bdev)
|
|
return -ENOMEM;
|
|
|
|
/* Do not reset an active device! */
|
|
if (bdev->bd_holders) {
|
|
ret = -EBUSY;
|
|
goto out;
|
|
}
|
|
|
|
ret = kstrtou16(buf, 10, &do_reset);
|
|
if (ret)
|
|
goto out;
|
|
|
|
if (!do_reset) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
/* Make sure all pending I/O is finished */
|
|
fsync_bdev(bdev);
|
|
bdput(bdev);
|
|
|
|
zram_reset_device(zram, true);
|
|
return len;
|
|
|
|
out:
|
|
bdput(bdev);
|
|
return ret;
|
|
}
|
|
|
|
static void __zram_make_request(struct zram *zram, struct bio *bio)
|
|
{
|
|
int offset;
|
|
u32 index;
|
|
struct bio_vec bvec;
|
|
struct bvec_iter iter;
|
|
|
|
index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
|
offset = (bio->bi_iter.bi_sector &
|
|
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
|
|
|
|
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
|
zram_bio_discard(zram, index, offset, bio);
|
|
bio_endio(bio, 0);
|
|
return;
|
|
}
|
|
|
|
bio_for_each_segment(bvec, bio, iter) {
|
|
int max_transfer_size = PAGE_SIZE - offset;
|
|
|
|
if (bvec.bv_len > max_transfer_size) {
|
|
/*
|
|
* zram_bvec_rw() can only make operation on a single
|
|
* zram page. Split the bio vector.
|
|
*/
|
|
struct bio_vec bv;
|
|
|
|
bv.bv_page = bvec.bv_page;
|
|
bv.bv_len = max_transfer_size;
|
|
bv.bv_offset = bvec.bv_offset;
|
|
|
|
if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
|
|
goto out;
|
|
|
|
bv.bv_len = bvec.bv_len - max_transfer_size;
|
|
bv.bv_offset += max_transfer_size;
|
|
if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
|
|
goto out;
|
|
} else
|
|
if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
|
|
goto out;
|
|
|
|
update_position(&index, &offset, &bvec);
|
|
}
|
|
|
|
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
bio_endio(bio, 0);
|
|
return;
|
|
|
|
out:
|
|
bio_io_error(bio);
|
|
}
|
|
|
|
/*
|
|
* Handler function for all zram I/O requests.
|
|
*/
|
|
static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
|
{
|
|
struct zram *zram = queue->queuedata;
|
|
|
|
down_read(&zram->init_lock);
|
|
if (unlikely(!init_done(zram)))
|
|
goto error;
|
|
|
|
if (!valid_io_request(zram, bio)) {
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
goto error;
|
|
}
|
|
|
|
__zram_make_request(zram, bio);
|
|
up_read(&zram->init_lock);
|
|
|
|
return;
|
|
|
|
error:
|
|
up_read(&zram->init_lock);
|
|
bio_io_error(bio);
|
|
}
|
|
|
|
static void zram_slot_free_notify(struct block_device *bdev,
|
|
unsigned long index)
|
|
{
|
|
struct zram *zram;
|
|
struct zram_meta *meta;
|
|
|
|
zram = bdev->bd_disk->private_data;
|
|
meta = zram->meta;
|
|
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
zram_free_page(zram, index);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
atomic64_inc(&zram->stats.notify_free);
|
|
}
|
|
|
|
static const struct block_device_operations zram_devops = {
|
|
.swap_slot_free_notify = zram_slot_free_notify,
|
|
.owner = THIS_MODULE
|
|
};
|
|
|
|
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
|
|
disksize_show, disksize_store);
|
|
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
|
|
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
|
|
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
|
|
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
|
|
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
|
|
max_comp_streams_show, max_comp_streams_store);
|
|
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
|
|
comp_algorithm_show, comp_algorithm_store);
|
|
|
|
ZRAM_ATTR_RO(num_reads);
|
|
ZRAM_ATTR_RO(num_writes);
|
|
ZRAM_ATTR_RO(failed_reads);
|
|
ZRAM_ATTR_RO(failed_writes);
|
|
ZRAM_ATTR_RO(invalid_io);
|
|
ZRAM_ATTR_RO(notify_free);
|
|
ZRAM_ATTR_RO(zero_pages);
|
|
ZRAM_ATTR_RO(compr_data_size);
|
|
|
|
static struct attribute *zram_disk_attrs[] = {
|
|
&dev_attr_disksize.attr,
|
|
&dev_attr_initstate.attr,
|
|
&dev_attr_reset.attr,
|
|
&dev_attr_num_reads.attr,
|
|
&dev_attr_num_writes.attr,
|
|
&dev_attr_failed_reads.attr,
|
|
&dev_attr_failed_writes.attr,
|
|
&dev_attr_invalid_io.attr,
|
|
&dev_attr_notify_free.attr,
|
|
&dev_attr_zero_pages.attr,
|
|
&dev_attr_orig_data_size.attr,
|
|
&dev_attr_compr_data_size.attr,
|
|
&dev_attr_mem_used_total.attr,
|
|
&dev_attr_max_comp_streams.attr,
|
|
&dev_attr_comp_algorithm.attr,
|
|
NULL,
|
|
};
|
|
|
|
static struct attribute_group zram_disk_attr_group = {
|
|
.attrs = zram_disk_attrs,
|
|
};
|
|
|
|
static int create_device(struct zram *zram, int device_id)
|
|
{
|
|
int ret = -ENOMEM;
|
|
|
|
init_rwsem(&zram->init_lock);
|
|
|
|
zram->queue = blk_alloc_queue(GFP_KERNEL);
|
|
if (!zram->queue) {
|
|
pr_err("Error allocating disk queue for device %d\n",
|
|
device_id);
|
|
goto out;
|
|
}
|
|
|
|
blk_queue_make_request(zram->queue, zram_make_request);
|
|
zram->queue->queuedata = zram;
|
|
|
|
/* gendisk structure */
|
|
zram->disk = alloc_disk(1);
|
|
if (!zram->disk) {
|
|
pr_warn("Error allocating disk structure for device %d\n",
|
|
device_id);
|
|
goto out_free_queue;
|
|
}
|
|
|
|
zram->disk->major = zram_major;
|
|
zram->disk->first_minor = device_id;
|
|
zram->disk->fops = &zram_devops;
|
|
zram->disk->queue = zram->queue;
|
|
zram->disk->private_data = zram;
|
|
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
|
|
|
|
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
|
|
set_capacity(zram->disk, 0);
|
|
/* zram devices sort of resembles non-rotational disks */
|
|
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
|
|
/*
|
|
* To ensure that we always get PAGE_SIZE aligned
|
|
* and n*PAGE_SIZED sized I/O requests.
|
|
*/
|
|
blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
|
|
blk_queue_logical_block_size(zram->disk->queue,
|
|
ZRAM_LOGICAL_BLOCK_SIZE);
|
|
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
|
|
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
|
|
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
|
|
zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
|
|
/*
|
|
* zram_bio_discard() will clear all logical blocks if logical block
|
|
* size is identical with physical block size(PAGE_SIZE). But if it is
|
|
* different, we will skip discarding some parts of logical blocks in
|
|
* the part of the request range which isn't aligned to physical block
|
|
* size. So we can't ensure that all discarded logical blocks are
|
|
* zeroed.
|
|
*/
|
|
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
|
|
zram->disk->queue->limits.discard_zeroes_data = 1;
|
|
else
|
|
zram->disk->queue->limits.discard_zeroes_data = 0;
|
|
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
|
|
|
|
add_disk(zram->disk);
|
|
|
|
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
|
|
&zram_disk_attr_group);
|
|
if (ret < 0) {
|
|
pr_warn("Error creating sysfs group");
|
|
goto out_free_disk;
|
|
}
|
|
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
|
|
zram->meta = NULL;
|
|
zram->max_comp_streams = 1;
|
|
return 0;
|
|
|
|
out_free_disk:
|
|
del_gendisk(zram->disk);
|
|
put_disk(zram->disk);
|
|
out_free_queue:
|
|
blk_cleanup_queue(zram->queue);
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static void destroy_device(struct zram *zram)
|
|
{
|
|
sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
|
|
&zram_disk_attr_group);
|
|
|
|
del_gendisk(zram->disk);
|
|
put_disk(zram->disk);
|
|
|
|
blk_cleanup_queue(zram->queue);
|
|
}
|
|
|
|
static int __init zram_init(void)
|
|
{
|
|
int ret, dev_id;
|
|
|
|
if (num_devices > max_num_devices) {
|
|
pr_warn("Invalid value for num_devices: %u\n",
|
|
num_devices);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
zram_major = register_blkdev(0, "zram");
|
|
if (zram_major <= 0) {
|
|
pr_warn("Unable to get major number\n");
|
|
ret = -EBUSY;
|
|
goto out;
|
|
}
|
|
|
|
/* Allocate the device array and initialize each one */
|
|
zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
|
|
if (!zram_devices) {
|
|
ret = -ENOMEM;
|
|
goto unregister;
|
|
}
|
|
|
|
for (dev_id = 0; dev_id < num_devices; dev_id++) {
|
|
ret = create_device(&zram_devices[dev_id], dev_id);
|
|
if (ret)
|
|
goto free_devices;
|
|
}
|
|
|
|
pr_info("Created %u device(s) ...\n", num_devices);
|
|
|
|
return 0;
|
|
|
|
free_devices:
|
|
while (dev_id)
|
|
destroy_device(&zram_devices[--dev_id]);
|
|
kfree(zram_devices);
|
|
unregister:
|
|
unregister_blkdev(zram_major, "zram");
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static void __exit zram_exit(void)
|
|
{
|
|
int i;
|
|
struct zram *zram;
|
|
|
|
for (i = 0; i < num_devices; i++) {
|
|
zram = &zram_devices[i];
|
|
|
|
destroy_device(zram);
|
|
/*
|
|
* Shouldn't access zram->disk after destroy_device
|
|
* because destroy_device already released zram->disk.
|
|
*/
|
|
zram_reset_device(zram, false);
|
|
}
|
|
|
|
unregister_blkdev(zram_major, "zram");
|
|
|
|
kfree(zram_devices);
|
|
pr_debug("Cleanup done!\n");
|
|
}
|
|
|
|
module_init(zram_init);
|
|
module_exit(zram_exit);
|
|
|
|
module_param(num_devices, uint, 0);
|
|
MODULE_PARM_DESC(num_devices, "Number of zram devices");
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
|
|
MODULE_DESCRIPTION("Compressed RAM Block Device");
|