forked from Minki/linux
dmaengine: at_xdmac: Remove a level of indentation in at_xdmac_tasklet()
Apart of making the code easier to read, this patch is a prerequisite for a functional change: tasklets run with interrupts enabled, so we need to protect atchan->irq_status with spin_lock_irq() otherwise the tasklet can be interrupted by the IRQ that modifies irq_status. atchan->irq_status will be protected in a further patch. Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com> Link: https://lore.kernel.org/r/20211215110115.191749-12-tudor.ambarus@microchip.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
912f7c6f7f
commit
a61210cae8
@ -1667,53 +1667,51 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
|
||||
struct at_xdmac_desc *desc;
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
u32 error_mask;
|
||||
|
||||
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
|
||||
__func__, atchan->irq_status);
|
||||
|
||||
error_mask = AT_XDMAC_CIS_RBEIS
|
||||
| AT_XDMAC_CIS_WBEIS
|
||||
| AT_XDMAC_CIS_ROIS;
|
||||
if (at_xdmac_chan_is_cyclic(atchan))
|
||||
return at_xdmac_handle_cyclic(atchan);
|
||||
|
||||
if (at_xdmac_chan_is_cyclic(atchan)) {
|
||||
at_xdmac_handle_cyclic(atchan);
|
||||
} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
|
||||
|| (atchan->irq_status & error_mask)) {
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
|
||||
AT_XDMAC_CIS_ROIS;
|
||||
|
||||
if (atchan->irq_status & error_mask)
|
||||
at_xdmac_handle_error(atchan);
|
||||
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
|
||||
!(atchan->irq_status & error_mask))
|
||||
return;
|
||||
|
||||
spin_lock_irq(&atchan->lock);
|
||||
desc = list_first_entry(&atchan->xfers_list,
|
||||
struct at_xdmac_desc,
|
||||
xfer_node);
|
||||
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
||||
if (!desc->active_xfer) {
|
||||
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
return;
|
||||
}
|
||||
if (atchan->irq_status & error_mask)
|
||||
at_xdmac_handle_error(atchan);
|
||||
|
||||
txd = &desc->tx_dma_desc;
|
||||
dma_cookie_complete(txd);
|
||||
/* Remove the transfer from the transfer list. */
|
||||
list_del(&desc->xfer_node);
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
|
||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
|
||||
spin_lock_irq(&atchan->lock);
|
||||
/* Move the xfer descriptors into the free descriptors list. */
|
||||
list_splice_tail_init(&desc->descs_list,
|
||||
&atchan->free_descs_list);
|
||||
at_xdmac_advance_work(atchan);
|
||||
spin_lock_irq(&atchan->lock);
|
||||
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
|
||||
xfer_node);
|
||||
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
||||
if (!desc->active_xfer) {
|
||||
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
txd = &desc->tx_dma_desc;
|
||||
dma_cookie_complete(txd);
|
||||
/* Remove the transfer from the transfer list. */
|
||||
list_del(&desc->xfer_node);
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
|
||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
|
||||
spin_lock_irq(&atchan->lock);
|
||||
/* Move the xfer descriptors into the free descriptors list. */
|
||||
list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
|
||||
at_xdmac_advance_work(atchan);
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
}
|
||||
|
||||
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
|
||||
|
Loading…
Reference in New Issue
Block a user