nvme-fabrics: Convert nvmf_transports_mutex to an rwsem

The mutex protects against the list of transports changing while a
controller is being created, but using a plain old mutex means that it
also serializes controller creation.  This unnecessarily slows down
creating multiple controllers - for example for the RDMA transport,
creating a controller involves establishing one connection for every IO
queue, which involves even more network/software round trips, so the
delay can become significant.

The simplest way to fix this is to change the mutex to an rwsem and only
hold it for writing when the list is being mutated.  Since we can take
the rwsem for reading while creating a controller, we can create multiple
controllers in parallel.

Signed-off-by: Roland Dreier <roland@purestorage.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Roland Dreier 2017-08-29 10:33:44 -07:00 committed by Christoph Hellwig
parent 2b76da9563
commit 489beb91e6

View File

@ -22,7 +22,7 @@
#include "fabrics.h" #include "fabrics.h"
static LIST_HEAD(nvmf_transports); static LIST_HEAD(nvmf_transports);
static DEFINE_MUTEX(nvmf_transports_mutex); static DECLARE_RWSEM(nvmf_transports_rwsem);
static LIST_HEAD(nvmf_hosts); static LIST_HEAD(nvmf_hosts);
static DEFINE_MUTEX(nvmf_hosts_mutex); static DEFINE_MUTEX(nvmf_hosts_mutex);
@ -495,9 +495,9 @@ int nvmf_register_transport(struct nvmf_transport_ops *ops)
if (!ops->create_ctrl) if (!ops->create_ctrl)
return -EINVAL; return -EINVAL;
mutex_lock(&nvmf_transports_mutex); down_write(&nvmf_transports_rwsem);
list_add_tail(&ops->entry, &nvmf_transports); list_add_tail(&ops->entry, &nvmf_transports);
mutex_unlock(&nvmf_transports_mutex); up_write(&nvmf_transports_rwsem);
return 0; return 0;
} }
@ -514,9 +514,9 @@ EXPORT_SYMBOL_GPL(nvmf_register_transport);
*/ */
void nvmf_unregister_transport(struct nvmf_transport_ops *ops) void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
{ {
mutex_lock(&nvmf_transports_mutex); down_write(&nvmf_transports_rwsem);
list_del(&ops->entry); list_del(&ops->entry);
mutex_unlock(&nvmf_transports_mutex); up_write(&nvmf_transports_rwsem);
} }
EXPORT_SYMBOL_GPL(nvmf_unregister_transport); EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
@ -525,7 +525,7 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
{ {
struct nvmf_transport_ops *ops; struct nvmf_transport_ops *ops;
lockdep_assert_held(&nvmf_transports_mutex); lockdep_assert_held(&nvmf_transports_rwsem);
list_for_each_entry(ops, &nvmf_transports, entry) { list_for_each_entry(ops, &nvmf_transports, entry) {
if (strcmp(ops->name, opts->transport) == 0) if (strcmp(ops->name, opts->transport) == 0)
@ -851,7 +851,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_free_opts; goto out_free_opts;
opts->mask &= ~NVMF_REQUIRED_OPTS; opts->mask &= ~NVMF_REQUIRED_OPTS;
mutex_lock(&nvmf_transports_mutex); down_read(&nvmf_transports_rwsem);
ops = nvmf_lookup_transport(opts); ops = nvmf_lookup_transport(opts);
if (!ops) { if (!ops) {
pr_info("no handler found for transport %s.\n", pr_info("no handler found for transport %s.\n",
@ -878,16 +878,16 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
dev_warn(ctrl->device, dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n", "controller returned incorrect NQN: \"%s\".\n",
ctrl->subnqn); ctrl->subnqn);
mutex_unlock(&nvmf_transports_mutex); up_read(&nvmf_transports_rwsem);
ctrl->ops->delete_ctrl(ctrl); ctrl->ops->delete_ctrl(ctrl);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
mutex_unlock(&nvmf_transports_mutex); up_read(&nvmf_transports_rwsem);
return ctrl; return ctrl;
out_unlock: out_unlock:
mutex_unlock(&nvmf_transports_mutex); up_read(&nvmf_transports_rwsem);
out_free_opts: out_free_opts:
nvmf_free_options(opts); nvmf_free_options(opts);
return ERR_PTR(ret); return ERR_PTR(ret);