forked from Minki/linux
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: async_tx: avoid the async xor_zero_sum path when src_cnt > device->max_xor fsldma: Fix the DMA halt when using DMA_INTERRUPT async_tx transfer.
This commit is contained in:
commit
264e3e889d
@ -271,7 +271,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
|
||||
|
||||
BUG_ON(src_cnt <= 1);
|
||||
|
||||
if (device) {
|
||||
if (device && src_cnt <= device->max_xor) {
|
||||
dma_addr_t *dma_src = (dma_addr_t *) src_list;
|
||||
unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
|
||||
int i;
|
||||
|
@ -123,6 +123,11 @@ static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
|
||||
return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
|
||||
}
|
||||
|
||||
static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
|
||||
{
|
||||
return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
|
||||
}
|
||||
|
||||
static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
|
||||
{
|
||||
u32 sr = get_sr(fsl_chan);
|
||||
@ -426,6 +431,9 @@ fsl_dma_prep_interrupt(struct dma_chan *chan)
|
||||
new->async_tx.cookie = -EBUSY;
|
||||
new->async_tx.ack = 0;
|
||||
|
||||
/* Insert the link descriptor to the LD ring */
|
||||
list_add_tail(&new->node, &new->async_tx.tx_list);
|
||||
|
||||
/* Set End-of-link to the last link descriptor of new list*/
|
||||
set_ld_eol(fsl_chan, new);
|
||||
|
||||
@ -701,6 +709,23 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
|
||||
if (stat & FSL_DMA_SR_TE)
|
||||
dev_err(fsl_chan->dev, "Transfer Error!\n");
|
||||
|
||||
/* Programming Error
|
||||
* The DMA_INTERRUPT async_tx is a NULL transfer, which will
|
||||
* triger a PE interrupt.
|
||||
*/
|
||||
if (stat & FSL_DMA_SR_PE) {
|
||||
dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
|
||||
if (get_bcr(fsl_chan) == 0) {
|
||||
/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
|
||||
* Now, update the completed cookie, and continue the
|
||||
* next uncompleted transfer.
|
||||
*/
|
||||
fsl_dma_update_completed_cookie(fsl_chan);
|
||||
fsl_chan_xfer_ld_queue(fsl_chan);
|
||||
}
|
||||
stat &= ~FSL_DMA_SR_PE;
|
||||
}
|
||||
|
||||
/* If the link descriptor segment transfer finishes,
|
||||
* we will recycle the used descriptor.
|
||||
*/
|
||||
@ -841,6 +866,11 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
|
||||
tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
|
||||
async_tx_ack(tx3);
|
||||
|
||||
/* Interrupt tx test */
|
||||
tx1 = fsl_dma_prep_interrupt(chan);
|
||||
async_tx_ack(tx1);
|
||||
cookie = fsl_dma_tx_submit(tx1);
|
||||
|
||||
/* Test exchanging the prepared tx sort */
|
||||
cookie = fsl_dma_tx_submit(tx3);
|
||||
cookie = fsl_dma_tx_submit(tx2);
|
||||
|
@ -40,6 +40,7 @@
|
||||
#define FSL_DMA_MR_EOTIE 0x00000080
|
||||
|
||||
#define FSL_DMA_SR_CH 0x00000020
|
||||
#define FSL_DMA_SR_PE 0x00000010
|
||||
#define FSL_DMA_SR_CB 0x00000004
|
||||
#define FSL_DMA_SR_TE 0x00000080
|
||||
#define FSL_DMA_SR_EOSI 0x00000002
|
||||
|
Loading…
Reference in New Issue
Block a user