forked from Minki/linux
nvmet: add buffered I/O support for file backed ns
Add a new "buffered_io" attribute, which disabled direct I/O and thus enables page cache based caching when enabled. The attribute can only be changed when the namespace is disabled as the file has to be reopend for the change to take effect. The possibly blocking read/write are deferred to a newly introduced global workqueue. Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
0866bf0c37
commit
55eb942eda
@ -407,11 +407,40 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
|
||||
|
||||
CONFIGFS_ATTR(nvmet_ns_, enable);
|
||||
|
||||
static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
|
||||
{
|
||||
return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
|
||||
}
|
||||
|
||||
static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct nvmet_ns *ns = to_nvmet_ns(item);
|
||||
bool val;
|
||||
|
||||
if (strtobool(page, &val))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ns->subsys->lock);
|
||||
if (ns->enabled) {
|
||||
pr_err("disable ns before setting buffered_io value.\n");
|
||||
mutex_unlock(&ns->subsys->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ns->buffered_io = val;
|
||||
mutex_unlock(&ns->subsys->lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
CONFIGFS_ATTR(nvmet_ns_, buffered_io);
|
||||
|
||||
static struct configfs_attribute *nvmet_ns_attrs[] = {
|
||||
&nvmet_ns_attr_device_path,
|
||||
&nvmet_ns_attr_device_nguid,
|
||||
&nvmet_ns_attr_device_uuid,
|
||||
&nvmet_ns_attr_enable,
|
||||
&nvmet_ns_attr_buffered_io,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
#include "nvmet.h"
|
||||
|
||||
struct workqueue_struct *buffered_io_wq;
|
||||
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
|
||||
static DEFINE_IDA(cntlid_ida);
|
||||
|
||||
@ -437,6 +438,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
|
||||
ns->nsid = nsid;
|
||||
ns->subsys = subsys;
|
||||
uuid_gen(&ns->uuid);
|
||||
ns->buffered_io = false;
|
||||
|
||||
return ns;
|
||||
}
|
||||
@ -1109,6 +1111,12 @@ static int __init nvmet_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
|
||||
WQ_MEM_RECLAIM, 0);
|
||||
if (!buffered_io_wq) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
error = nvmet_init_discovery();
|
||||
if (error)
|
||||
goto out;
|
||||
@ -1129,6 +1137,7 @@ static void __exit nvmet_exit(void)
|
||||
nvmet_exit_configfs();
|
||||
nvmet_exit_discovery();
|
||||
ida_destroy(&cntlid_ida);
|
||||
destroy_workqueue(buffered_io_wq);
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
|
||||
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
|
||||
|
@ -16,6 +16,8 @@
|
||||
void nvmet_file_ns_disable(struct nvmet_ns *ns)
|
||||
{
|
||||
if (ns->file) {
|
||||
if (ns->buffered_io)
|
||||
flush_workqueue(buffered_io_wq);
|
||||
mempool_destroy(ns->bvec_pool);
|
||||
ns->bvec_pool = NULL;
|
||||
kmem_cache_destroy(ns->bvec_cache);
|
||||
@ -27,11 +29,14 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
|
||||
|
||||
int nvmet_file_ns_enable(struct nvmet_ns *ns)
|
||||
{
|
||||
int ret;
|
||||
int flags = O_RDWR | O_LARGEFILE;
|
||||
struct kstat stat;
|
||||
int ret;
|
||||
|
||||
ns->file = filp_open(ns->device_path,
|
||||
O_RDWR | O_LARGEFILE | O_DIRECT, 0);
|
||||
if (!ns->buffered_io)
|
||||
flags |= O_DIRECT;
|
||||
|
||||
ns->file = filp_open(ns->device_path, flags, 0);
|
||||
if (IS_ERR(ns->file)) {
|
||||
pr_err("failed to open file %s: (%ld)\n",
|
||||
ns->device_path, PTR_ERR(ns->file));
|
||||
@ -100,7 +105,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
|
||||
|
||||
iocb->ki_pos = pos;
|
||||
iocb->ki_filp = req->ns->file;
|
||||
iocb->ki_flags = IOCB_DIRECT | ki_flags;
|
||||
iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
|
||||
|
||||
ret = call_iter(iocb, &iter);
|
||||
|
||||
@ -189,6 +194,19 @@ out:
|
||||
nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
|
||||
}
|
||||
|
||||
static void nvmet_file_buffered_io_work(struct work_struct *w)
|
||||
{
|
||||
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
|
||||
|
||||
nvmet_file_execute_rw(req);
|
||||
}
|
||||
|
||||
static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
|
||||
{
|
||||
INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
|
||||
queue_work(buffered_io_wq, &req->f.work);
|
||||
}
|
||||
|
||||
static void nvmet_file_flush_work(struct work_struct *w)
|
||||
{
|
||||
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
|
||||
@ -280,7 +298,10 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
|
||||
switch (cmd->common.opcode) {
|
||||
case nvme_cmd_read:
|
||||
case nvme_cmd_write:
|
||||
req->execute = nvmet_file_execute_rw;
|
||||
if (req->ns->buffered_io)
|
||||
req->execute = nvmet_file_execute_rw_buffered_io;
|
||||
else
|
||||
req->execute = nvmet_file_execute_rw;
|
||||
req->data_len = nvmet_rw_len(req);
|
||||
return 0;
|
||||
case nvme_cmd_flush:
|
||||
|
@ -65,6 +65,7 @@ struct nvmet_ns {
|
||||
u8 nguid[16];
|
||||
uuid_t uuid;
|
||||
|
||||
bool buffered_io;
|
||||
bool enabled;
|
||||
struct nvmet_subsys *subsys;
|
||||
const char *device_path;
|
||||
@ -269,6 +270,8 @@ struct nvmet_req {
|
||||
const struct nvmet_fabrics_ops *ops;
|
||||
};
|
||||
|
||||
extern struct workqueue_struct *buffered_io_wq;
|
||||
|
||||
static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
|
||||
{
|
||||
req->rsp->status = cpu_to_le16(status << 1);
|
||||
|
Loading…
Reference in New Issue
Block a user