dmaengine: idxd: remove interrupt flag for completion list spinlock
The list lock is never acquired in interrupt context. Therefore there is no need to disable interrupts. Remove interrupt flags for lock operations. Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/162826417450.3454650.3733188117742416238.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
15cb0321a5
commit
9fce3b3a0a
@ -176,7 +176,6 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
|
||||
{
|
||||
struct idxd_desc *desc, *t;
|
||||
struct llist_node *head;
|
||||
unsigned long flags;
|
||||
|
||||
head = llist_del_all(&irq_entry->pending_llist);
|
||||
if (!head)
|
||||
@ -197,17 +196,16 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
|
||||
|
||||
complete_desc(desc, IDXD_COMPLETE_NORMAL);
|
||||
} else {
|
||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||
spin_lock(&irq_entry->list_lock);
|
||||
list_add_tail(&desc->list,
|
||||
&irq_entry->work_list);
|
||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
||||
spin_unlock(&irq_entry->list_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
|
||||
{
|
||||
unsigned long flags;
|
||||
LIST_HEAD(flist);
|
||||
struct idxd_desc *desc, *n;
|
||||
|
||||
@ -215,9 +213,9 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
|
||||
* This lock protects list corruption from access of list outside of the irq handler
|
||||
* thread.
|
||||
*/
|
||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||
spin_lock(&irq_entry->list_lock);
|
||||
if (list_empty(&irq_entry->work_list)) {
|
||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
||||
spin_unlock(&irq_entry->list_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -228,7 +226,7 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
|
||||
spin_unlock(&irq_entry->list_lock);
|
||||
|
||||
list_for_each_entry(desc, &flist, list) {
|
||||
/*
|
||||
|
@ -106,14 +106,13 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
|
||||
{
|
||||
struct idxd_desc *d, *t, *found = NULL;
|
||||
struct llist_node *head;
|
||||
unsigned long flags;
|
||||
|
||||
desc->completion->status = IDXD_COMP_DESC_ABORT;
|
||||
/*
|
||||
* Grab the list lock so it will block the irq thread handler. This allows the
|
||||
* abort code to locate the descriptor need to be aborted.
|
||||
*/
|
||||
spin_lock_irqsave(&ie->list_lock, flags);
|
||||
spin_lock(&ie->list_lock);
|
||||
head = llist_del_all(&ie->pending_llist);
|
||||
if (head) {
|
||||
llist_for_each_entry_safe(d, t, head, llnode) {
|
||||
@ -127,7 +126,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
|
||||
|
||||
if (!found)
|
||||
found = list_abort_desc(wq, ie, desc);
|
||||
spin_unlock_irqrestore(&ie->list_lock, flags);
|
||||
spin_unlock(&ie->list_lock);
|
||||
|
||||
if (found)
|
||||
complete_desc(found, IDXD_COMPLETE_ABORT);
|
||||
|
Loading…
Reference in New Issue
Block a user