forked from Minki/linux
Merge branch 'old_next' into next
This commit is contained in:
commit
8194145dcc
@ -304,6 +304,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
|
||||
|
||||
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
|
||||
/* check first descriptors addr */
|
||||
if (desc->txd.phys == llp)
|
||||
return;
|
||||
|
||||
/* check first descriptors llp */
|
||||
if (desc->lli.llp == llp)
|
||||
/* This one is currently in progress */
|
||||
return;
|
||||
|
@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
|
||||
if (err)
|
||||
goto err_dma;
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_allow(&pdev->dev);
|
||||
return 0;
|
||||
|
||||
@ -1322,6 +1321,9 @@ err_enable_device:
|
||||
static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct middma_device *device = pci_get_drvdata(pdev);
|
||||
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
pm_runtime_forbid(&pdev->dev);
|
||||
middma_shutdown(pdev);
|
||||
pci_dev_put(pdev);
|
||||
kfree(device);
|
||||
@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci)
|
||||
static int dma_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
return dma_suspend(pci_dev, PMSG_SUSPEND);
|
||||
struct middma_device *device = pci_get_drvdata(pci_dev);
|
||||
|
||||
device->state = SUSPENDED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
return dma_resume(pci_dev);
|
||||
struct middma_device *device = pci_get_drvdata(pci_dev);
|
||||
|
||||
device->state = RUNNING;
|
||||
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_runtime_idle(struct device *dev)
|
||||
|
@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
|
||||
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
|
||||
__func__, len);
|
||||
@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
|
||||
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
|
||||
__func__, len);
|
||||
@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
|
||||
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(iop_chan->device->common.dev,
|
||||
"%s src_cnt: %d len: %u flags: %lx\n",
|
||||
|
@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
||||
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
spin_lock_bh(&mv_chan->lock);
|
||||
slot_cnt = mv_chan_memcpy_slot_count(len);
|
||||
@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
||||
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
spin_lock_bh(&mv_chan->lock);
|
||||
slot_cnt = mv_chan_memset_slot_count(len);
|
||||
@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
|
||||
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(mv_chan->device->common.dev,
|
||||
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
|
||||
|
@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
|
||||
|
||||
spin_lock_bh(&ppc440spe_chan->lock);
|
||||
|
||||
@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
|
||||
BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
|
||||
|
||||
spin_lock_bh(&ppc440spe_chan->lock);
|
||||
|
||||
@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
|
||||
dma_dest, dma_src, src_cnt));
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
|
||||
|
||||
dev_dbg(ppc440spe_chan->device->common.dev,
|
||||
"ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
|
||||
@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
|
||||
ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
|
||||
dst, src, src_cnt));
|
||||
BUG_ON(!len);
|
||||
BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
|
||||
BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
|
||||
BUG_ON(!src_cnt);
|
||||
|
||||
if (src_cnt == 1 && dst[1] == src[0]) {
|
||||
|
Loading…
Reference in New Issue
Block a user