nvmet-fc: use RCU proctection for assoc_list

searching assoc_list protected by rcu_read_lock if list not changed inline.
and according to the rcu list rules.

queue array embedded into nvmet_fc_tgt_assoc protected by rcu_read_lock
according to rcu dereference/assign rules.

queue and assoc object freed after grace period by call_rcu.

tgtport lock taken for changing assoc_list.

Reviewed-by: Eldad Zinger <Eldad.Zinger@dell.com>
Reviewed-by: Elad Grupi <Elad.Grupi@dell.com>
Reviewed-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Leonid Ravich 2021-01-03 20:12:54 +02:00 committed by Christoph Hellwig
parent 36ca03c830
commit 4e2f02bf77

View File

@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
struct rcu_head rcu;
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
@ -167,6 +168,7 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
struct rcu_head rcu;
};
@ -790,7 +792,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
u16 qid, u16 sqsize)
{
struct nvmet_fc_tgt_queue *queue;
unsigned long flags;
int ret;
if (qid > NVMET_NR_QUEUES)
@ -829,9 +830,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
spin_lock_irqsave(&assoc->tgtport->lock, flags);
assoc->queues[qid] = queue;
spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
rcu_assign_pointer(assoc->queues[qid], queue);
return queue;
@ -851,11 +850,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
{
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
unsigned long flags;
spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
queue->assoc->queues[queue->qid] = NULL;
spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
@ -863,7 +859,7 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
destroy_workqueue(queue->work_q);
kfree(queue);
kfree_rcu(queue, rcu);
}
static void
@ -965,24 +961,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_tgt_queue *queue;
u64 association_id = nvmet_fc_getassociationid(connection_id);
u16 qid = nvmet_fc_getqueueid(connection_id);
unsigned long flags;
if (qid > NVMET_NR_QUEUES)
return NULL;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
queue = assoc->queues[qid];
queue = rcu_dereference(assoc->queues[qid]);
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
queue = NULL;
spin_unlock_irqrestore(&tgtport->lock, flags);
rcu_read_unlock();
return queue;
}
}
spin_unlock_irqrestore(&tgtport->lock, flags);
rcu_read_unlock();
return NULL;
}
@ -1137,7 +1132,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
if (!needrandom) {
assoc->association_id = ran;
list_add_tail(&assoc->a_list, &tgtport->assoc_list);
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@ -1167,7 +1162,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
list_del(&assoc->a_list);
list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
@ -1177,7 +1172,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
kfree(assoc);
kfree_rcu(assoc, rcu);
nvmet_fc_tgtport_put(tgtport);
}
@ -1198,7 +1193,6 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
@ -1207,19 +1201,23 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
spin_lock_irqsave(&tgtport->lock, flags);
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
queue = assoc->queues[i];
if (queue) {
if (!nvmet_fc_tgt_q_get(queue))
continue;
spin_unlock_irqrestore(&tgtport->lock, flags);
nvmet_fc_delete_target_queue(queue);
nvmet_fc_tgt_q_put(queue);
spin_lock_irqsave(&tgtport->lock, flags);
rcu_read_lock();
queue = rcu_dereference(assoc->queues[i]);
if (!queue) {
rcu_read_unlock();
continue;
}
if (!nvmet_fc_tgt_q_get(queue)) {
rcu_read_unlock();
continue;
}
rcu_read_unlock();
nvmet_fc_delete_target_queue(queue);
nvmet_fc_tgt_q_put(queue);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
@ -1234,10 +1232,9 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
{
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_tgt_assoc *ret = NULL;
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
ret = assoc;
if (!nvmet_fc_tgt_a_get(assoc))
@ -1245,7 +1242,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
break;
}
}
spin_unlock_irqrestore(&tgtport->lock, flags);
rcu_read_unlock();
return ret;
}
@ -1473,19 +1470,17 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{
struct nvmet_fc_tgt_assoc *assoc, *next;
unsigned long flags;
struct nvmet_fc_tgt_assoc *assoc;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
if (!schedule_work(&assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
rcu_read_unlock();
}
/**
@ -1568,16 +1563,16 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
continue;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
queue = assoc->queues[0];
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
queue = rcu_dereference(assoc->queues[0]);
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
break;
}
}
spin_unlock_irqrestore(&tgtport->lock, flags);
rcu_read_unlock();
nvmet_fc_tgtport_put(tgtport);