2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2013-05-12 14:14:07 +00:00
|
|
|
* loop.h
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Written by Theodore Ts'o, 3/29/93.
|
|
|
|
*
|
|
|
|
* Copyright 1993 by Theodore Ts'o. Redistribution of this file is
|
|
|
|
* permitted under the GNU General Public License.
|
|
|
|
*/
|
2012-10-13 09:46:48 +00:00
|
|
|
#ifndef _LINUX_LOOP_H
|
|
|
|
#define _LINUX_LOOP_H
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/blkdev.h>
|
2015-01-02 22:20:25 +00:00
|
|
|
#include <linux/blk-mq.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/spinlock.h>
|
2006-03-23 11:00:38 +00:00
|
|
|
#include <linux/mutex.h>
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
#include <linux/workqueue.h>
|
2012-10-13 09:46:48 +00:00
|
|
|
#include <uapi/linux/loop.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Possible states of device */
|
|
|
|
enum {
|
|
|
|
Lo_unbound,
|
|
|
|
Lo_bound,
|
|
|
|
Lo_rundown,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct loop_func_table;
|
|
|
|
|
|
|
|
struct loop_device {
|
|
|
|
int lo_number;
|
2015-05-06 04:26:23 +00:00
|
|
|
atomic_t lo_refcnt;
|
2005-04-16 22:20:36 +00:00
|
|
|
loff_t lo_offset;
|
|
|
|
loff_t lo_sizelimit;
|
|
|
|
int lo_flags;
|
|
|
|
int (*transfer)(struct loop_device *, int cmd,
|
|
|
|
struct page *raw_page, unsigned raw_off,
|
|
|
|
struct page *loop_page, unsigned loop_off,
|
|
|
|
int size, sector_t real_block);
|
|
|
|
char lo_file_name[LO_NAME_SIZE];
|
|
|
|
char lo_crypt_name[LO_NAME_SIZE];
|
|
|
|
char lo_encrypt_key[LO_KEY_SIZE];
|
|
|
|
int lo_encrypt_key_size;
|
|
|
|
struct loop_func_table *lo_encryption;
|
|
|
|
__u32 lo_init[2];
|
2012-02-11 19:23:51 +00:00
|
|
|
kuid_t lo_key_owner; /* Who set the key */
|
2005-04-16 22:20:36 +00:00
|
|
|
int (*ioctl)(struct loop_device *, int cmd,
|
|
|
|
unsigned long arg);
|
|
|
|
|
|
|
|
struct file * lo_backing_file;
|
|
|
|
struct block_device *lo_device;
|
|
|
|
unsigned lo_blocksize;
|
|
|
|
void *key_data;
|
|
|
|
|
2005-10-21 07:22:34 +00:00
|
|
|
gfp_t old_gfp_mask;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spinlock_t lo_lock;
|
2015-05-05 11:49:54 +00:00
|
|
|
struct workqueue_struct *wq;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
struct list_head write_cmd_head;
|
|
|
|
struct work_struct write_work;
|
|
|
|
bool write_started;
|
2005-04-16 22:20:36 +00:00
|
|
|
int lo_state;
|
2006-03-23 11:00:38 +00:00
|
|
|
struct mutex lo_ctl_mutex;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-24 01:44:00 +00:00
|
|
|
struct request_queue *lo_queue;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
struct blk_mq_tag_set tag_set;
|
2007-05-08 07:28:20 +00:00
|
|
|
struct gendisk *lo_disk;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
struct loop_cmd {
|
|
|
|
struct work_struct read_work;
|
|
|
|
struct request *rq;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Support for loadable transfer modules */
|
|
|
|
struct loop_func_table {
|
|
|
|
int number; /* filter type */
|
|
|
|
int (*transfer)(struct loop_device *lo, int cmd,
|
|
|
|
struct page *raw_page, unsigned raw_off,
|
|
|
|
struct page *loop_page, unsigned loop_off,
|
|
|
|
int size, sector_t real_block);
|
|
|
|
int (*init)(struct loop_device *, const struct loop_info64 *);
|
|
|
|
/* release is called from loop_unregister_transfer or clr_fd */
|
|
|
|
int (*release)(struct loop_device *);
|
|
|
|
int (*ioctl)(struct loop_device *, int cmd, unsigned long arg);
|
|
|
|
struct module *owner;
|
|
|
|
};
|
|
|
|
|
|
|
|
int loop_register_transfer(struct loop_func_table *funcs);
|
|
|
|
int loop_unregister_transfer(int number);
|
|
|
|
|
|
|
|
#endif
|