Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul: "This is fairly big pull by my standards as I had missed last merge window. So we have the support for device tree for slave-dmaengine, large updates to dw_dmac driver from Andy for reusing on different architectures. Along with this we have fixes on bunch of the drivers" Fix up trivial conflicts, usually due to #include line movement next to each other. * 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits) Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT" ARM: dts: pl330: Add #dma-cells for generic dma binding support DMA: PL330: Register the DMA controller with the generic DMA helpers DMA: PL330: Add xlate function DMA: PL330: Add new pl330 filter for DT case. dma: tegra20-apb-dma: remove unnecessary assignment edma: do not waste memory for dma_mask dma: coh901318: set residue only if dma is in progress dma: coh901318: avoid unbalanced locking dmaengine.h: remove redundant else keyword dma: of-dma: protect list write operation by spin_lock dmaengine: ste_dma40: do not remove descriptors for cyclic transfers dma: of-dma.c: fix memory leakage dw_dmac: apply default dma_mask if needed dmaengine: ioat - fix spare sparse complain dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING dw_dmac: add support for Lynxpoint DMA controllers dw_dmac: return proper residue value dw_dmac: fill individual length of descriptor ...
This commit is contained in:
commit
5115f3c19d
Documentation/devicetree/bindings/dma
arch/arm
crypto/async_tx
drivers
dca
dma
KconfigMakefileamba-pl08x.cat_hdmac.cat_hdmac_regs.hcoh901318.ccoh901318_lli.cdmaengine.cdmatest.cdw_dmac.cdw_dmac_regs.hedma.cep93xx_dma.c
ioat
iop-adma.cipu
mmp_pdma.cmv_xor.cmxs-dma.cof-dma.cpch_dma.cpl330.csh
sirf-dma.cste_dma40.cste_dma40_ll.cste_dma40_ll.htegra20-apb-dma.cmisc/carma
mtd/nand
include/linux
net/ipv4
@ -10,7 +10,11 @@ Required properties:
|
|||||||
- interrupts: interrupt number to the cpu.
|
- interrupts: interrupt number to the cpu.
|
||||||
|
|
||||||
Optional properties:
|
Optional properties:
|
||||||
- dma-coherent : Present if dma operations are coherent
|
- dma-coherent : Present if dma operations are coherent
|
||||||
|
- #dma-cells: must be <1>. used to represent the number of integer
|
||||||
|
cells in the dmas property of client device.
|
||||||
|
- dma-channels: contains the total number of DMA channels supported by the DMAC
|
||||||
|
- dma-requests: contains the total number of DMA requests supported by the DMAC
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -18,16 +22,23 @@ Example:
|
|||||||
compatible = "arm,pl330", "arm,primecell";
|
compatible = "arm,pl330", "arm,primecell";
|
||||||
reg = <0x12680000 0x1000>;
|
reg = <0x12680000 0x1000>;
|
||||||
interrupts = <99>;
|
interrupts = <99>;
|
||||||
|
#dma-cells = <1>;
|
||||||
|
#dma-channels = <8>;
|
||||||
|
#dma-requests = <32>;
|
||||||
};
|
};
|
||||||
|
|
||||||
Client drivers (device nodes requiring dma transfers from dev-to-mem or
|
Client drivers (device nodes requiring dma transfers from dev-to-mem or
|
||||||
mem-to-dev) should specify the DMA channel numbers using a two-value pair
|
mem-to-dev) should specify the DMA channel numbers and dma channel names
|
||||||
as shown below.
|
as shown below.
|
||||||
|
|
||||||
[property name] = <[phandle of the dma controller] [dma request id]>;
|
[property name] = <[phandle of the dma controller] [dma request id]>;
|
||||||
|
[property name] = <[dma channel name]>
|
||||||
|
|
||||||
where 'dma request id' is the dma request number which is connected
|
where 'dma request id' is the dma request number which is connected
|
||||||
to the client controller. The 'property name' is recommended to be
|
to the client controller. The 'property name' 'dmas' and 'dma-names'
|
||||||
of the form <name>-dma-channel.
|
as required by the generic dma device tree binding helpers. The dma
|
||||||
|
names correspond 1:1 with the dma request ids in the dmas property.
|
||||||
|
|
||||||
Example: tx-dma-channel = <&pdma0 12>;
|
Example: dmas = <&pdma0 12
|
||||||
|
&pdma1 11>;
|
||||||
|
dma-names = "tx", "rx";
|
||||||
|
81
Documentation/devicetree/bindings/dma/dma.txt
Normal file
81
Documentation/devicetree/bindings/dma/dma.txt
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
* Generic DMA Controller and DMA request bindings
|
||||||
|
|
||||||
|
Generic binding to provide a way for a driver using DMA Engine to retrieve the
|
||||||
|
DMA request or channel information that goes from a hardware device to a DMA
|
||||||
|
controller.
|
||||||
|
|
||||||
|
|
||||||
|
* DMA controller
|
||||||
|
|
||||||
|
Required property:
|
||||||
|
- #dma-cells: Must be at least 1. Used to provide DMA controller
|
||||||
|
specific information. See DMA client binding below for
|
||||||
|
more details.
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
- dma-channels: Number of DMA channels supported by the controller.
|
||||||
|
- dma-requests: Number of DMA requests signals supported by the
|
||||||
|
controller.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
dma: dma@48000000 {
|
||||||
|
compatible = "ti,omap-sdma";
|
||||||
|
reg = <0x48000000 0x1000>;
|
||||||
|
interrupts = <0 12 0x4
|
||||||
|
0 13 0x4
|
||||||
|
0 14 0x4
|
||||||
|
0 15 0x4>;
|
||||||
|
#dma-cells = <1>;
|
||||||
|
dma-channels = <32>;
|
||||||
|
dma-requests = <127>;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
* DMA client
|
||||||
|
|
||||||
|
Client drivers should specify the DMA property using a phandle to the controller
|
||||||
|
followed by DMA controller specific data.
|
||||||
|
|
||||||
|
Required property:
|
||||||
|
- dmas: List of one or more DMA specifiers, each consisting of
|
||||||
|
- A phandle pointing to DMA controller node
|
||||||
|
- A number of integer cells, as determined by the
|
||||||
|
#dma-cells property in the node referenced by phandle
|
||||||
|
containing DMA controller specific information. This
|
||||||
|
typically contains a DMA request line number or a
|
||||||
|
channel number, but can contain any data that is used
|
||||||
|
required for configuring a channel.
|
||||||
|
- dma-names: Contains one identifier string for each DMA specifier in
|
||||||
|
the dmas property. The specific strings that can be used
|
||||||
|
are defined in the binding of the DMA client device.
|
||||||
|
Multiple DMA specifiers can be used to represent
|
||||||
|
alternatives and in this case the dma-names for those
|
||||||
|
DMA specifiers must be identical (see examples).
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
1. A device with one DMA read channel, one DMA write channel:
|
||||||
|
|
||||||
|
i2c1: i2c@1 {
|
||||||
|
...
|
||||||
|
dmas = <&dma 2 /* read channel */
|
||||||
|
&dma 3>; /* write channel */
|
||||||
|
dma-names = "rx", "tx";
|
||||||
|
...
|
||||||
|
};
|
||||||
|
|
||||||
|
2. A single read-write channel with three alternative DMA controllers:
|
||||||
|
|
||||||
|
dmas = <&dma1 5
|
||||||
|
&dma2 7
|
||||||
|
&dma3 2>;
|
||||||
|
dma-names = "rx-tx", "rx-tx", "rx-tx";
|
||||||
|
|
||||||
|
3. A device with three channels, one of which has two alternatives:
|
||||||
|
|
||||||
|
dmas = <&dma1 2 /* read channel */
|
||||||
|
&dma1 3 /* write channel */
|
||||||
|
&dma2 0 /* error read */
|
||||||
|
&dma3 0>; /* alternative error read */
|
||||||
|
dma-names = "rx", "tx", "error", "error";
|
@ -6,6 +6,26 @@ Required properties:
|
|||||||
- interrupt-parent: Should be the phandle for the interrupt controller
|
- interrupt-parent: Should be the phandle for the interrupt controller
|
||||||
that services interrupts for this device
|
that services interrupts for this device
|
||||||
- interrupt: Should contain the DMAC interrupt number
|
- interrupt: Should contain the DMAC interrupt number
|
||||||
|
- nr_channels: Number of channels supported by hardware
|
||||||
|
- is_private: The device channels should be marked as private and not for by the
|
||||||
|
general purpose DMA channel allocator. False if not passed.
|
||||||
|
- chan_allocation_order: order of allocation of channel, 0 (default): ascending,
|
||||||
|
1: descending
|
||||||
|
- chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1:
|
||||||
|
increase from chan n->0
|
||||||
|
- block_size: Maximum block size supported by the controller
|
||||||
|
- nr_masters: Number of AHB masters supported by the controller
|
||||||
|
- data_width: Maximum data width supported by hardware per AHB master
|
||||||
|
(0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
|
||||||
|
- slave_info:
|
||||||
|
- bus_id: name of this device channel, not just a device name since
|
||||||
|
devices may have more than one channel e.g. "foo_tx". For using the
|
||||||
|
dw_generic_filter(), slave drivers must pass exactly this string as
|
||||||
|
param to filter function.
|
||||||
|
- cfg_hi: Platform-specific initializer for the CFG_HI register
|
||||||
|
- cfg_lo: Platform-specific initializer for the CFG_LO register
|
||||||
|
- src_master: src master for transfers on allocated channel.
|
||||||
|
- dst_master: dest master for transfers on allocated channel.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -14,4 +34,28 @@ Example:
|
|||||||
reg = <0xfc000000 0x1000>;
|
reg = <0xfc000000 0x1000>;
|
||||||
interrupt-parent = <&vic1>;
|
interrupt-parent = <&vic1>;
|
||||||
interrupts = <12>;
|
interrupts = <12>;
|
||||||
|
|
||||||
|
nr_channels = <8>;
|
||||||
|
chan_allocation_order = <1>;
|
||||||
|
chan_priority = <1>;
|
||||||
|
block_size = <0xfff>;
|
||||||
|
nr_masters = <2>;
|
||||||
|
data_width = <3 3 0 0>;
|
||||||
|
|
||||||
|
slave_info {
|
||||||
|
uart0-tx {
|
||||||
|
bus_id = "uart0-tx";
|
||||||
|
cfg_hi = <0x4000>; /* 0x8 << 11 */
|
||||||
|
cfg_lo = <0>;
|
||||||
|
src_master = <0>;
|
||||||
|
dst_master = <1>;
|
||||||
|
};
|
||||||
|
spi0-tx {
|
||||||
|
bus_id = "spi0-tx";
|
||||||
|
cfg_hi = <0x2000>; /* 0x4 << 11 */
|
||||||
|
cfg_lo = <0>;
|
||||||
|
src_master = <0>;
|
||||||
|
dst_master = <0>;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
@ -312,24 +312,36 @@
|
|||||||
compatible = "arm,pl330", "arm,primecell";
|
compatible = "arm,pl330", "arm,primecell";
|
||||||
reg = <0x121A0000 0x1000>;
|
reg = <0x121A0000 0x1000>;
|
||||||
interrupts = <0 34 0>;
|
interrupts = <0 34 0>;
|
||||||
|
#dma-cells = <1>;
|
||||||
|
#dma-channels = <8>;
|
||||||
|
#dma-requests = <32>;
|
||||||
};
|
};
|
||||||
|
|
||||||
pdma1: pdma@121B0000 {
|
pdma1: pdma@121B0000 {
|
||||||
compatible = "arm,pl330", "arm,primecell";
|
compatible = "arm,pl330", "arm,primecell";
|
||||||
reg = <0x121B0000 0x1000>;
|
reg = <0x121B0000 0x1000>;
|
||||||
interrupts = <0 35 0>;
|
interrupts = <0 35 0>;
|
||||||
|
#dma-cells = <1>;
|
||||||
|
#dma-channels = <8>;
|
||||||
|
#dma-requests = <32>;
|
||||||
};
|
};
|
||||||
|
|
||||||
mdma0: mdma@10800000 {
|
mdma0: mdma@10800000 {
|
||||||
compatible = "arm,pl330", "arm,primecell";
|
compatible = "arm,pl330", "arm,primecell";
|
||||||
reg = <0x10800000 0x1000>;
|
reg = <0x10800000 0x1000>;
|
||||||
interrupts = <0 33 0>;
|
interrupts = <0 33 0>;
|
||||||
|
#dma-cells = <1>;
|
||||||
|
#dma-channels = <8>;
|
||||||
|
#dma-requests = <1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
mdma1: mdma@11C10000 {
|
mdma1: mdma@11C10000 {
|
||||||
compatible = "arm,pl330", "arm,primecell";
|
compatible = "arm,pl330", "arm,primecell";
|
||||||
reg = <0x11C10000 0x1000>;
|
reg = <0x11C10000 0x1000>;
|
||||||
interrupts = <0 124 0>;
|
interrupts = <0 124 0>;
|
||||||
|
#dma-cells = <1>;
|
||||||
|
#dma-channels = <8>;
|
||||||
|
#dma-requests = <1>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -23,13 +23,12 @@
|
|||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
#include <linux/amba/pl080.h>
|
||||||
|
|
||||||
#include <mach/dma.h>
|
#include <mach/dma.h>
|
||||||
#include <mach/map.h>
|
#include <mach/map.h>
|
||||||
#include <mach/irqs.h>
|
#include <mach/irqs.h>
|
||||||
|
|
||||||
#include <asm/hardware/pl080.h>
|
|
||||||
|
|
||||||
#include "regs-sys.h"
|
#include "regs-sys.h"
|
||||||
|
|
||||||
/* dma channel state information */
|
/* dma channel state information */
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include <linux/amba/pl022.h>
|
#include <linux/amba/pl022.h>
|
||||||
#include <linux/amba/pl08x.h>
|
#include <linux/amba/pl08x.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <asm/hardware/pl080.h>
|
|
||||||
#include <plat/pl080.h>
|
#include <plat/pl080.h>
|
||||||
#include <mach/generic.h>
|
#include <mach/generic.h>
|
||||||
#include <mach/spear.h>
|
#include <mach/spear.h>
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
#include <asm/hardware/pl080.h>
|
#include <linux/amba/pl080.h>
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/time.h>
|
#include <asm/mach/time.h>
|
||||||
#include <asm/mach/map.h>
|
#include <asm/mach/map.h>
|
||||||
|
@ -67,6 +67,12 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
|
|||||||
|
|
||||||
tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
|
tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
|
||||||
len, dma_prep_flags);
|
len, dma_prep_flags);
|
||||||
|
if (!tx) {
|
||||||
|
dma_unmap_page(device->dev, dma_dest, len,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
dma_unmap_page(device->dev, dma_src, len,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tx) {
|
if (tx) {
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
*/
|
*/
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/module.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/async_tx.h>
|
#include <linux/async_tx.h>
|
||||||
|
@ -128,8 +128,8 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
|
|||||||
}
|
}
|
||||||
device->device_issue_pending(chan);
|
device->device_issue_pending(chan);
|
||||||
} else {
|
} else {
|
||||||
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
|
if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
|
||||||
panic("%s: DMA_ERROR waiting for depend_tx\n",
|
panic("%s: DMA error waiting for depend_tx\n",
|
||||||
__func__);
|
__func__);
|
||||||
tx->tx_submit(tx);
|
tx->tx_submit(tx);
|
||||||
}
|
}
|
||||||
@ -280,8 +280,9 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
|
|||||||
* we are referring to the correct operation
|
* we are referring to the correct operation
|
||||||
*/
|
*/
|
||||||
BUG_ON(async_tx_test_ack(*tx));
|
BUG_ON(async_tx_test_ack(*tx));
|
||||||
if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
|
if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
|
||||||
panic("DMA_ERROR waiting for transaction\n");
|
panic("%s: DMA error waiting for transaction\n",
|
||||||
|
__func__);
|
||||||
async_tx_ack(*tx);
|
async_tx_ack(*tx);
|
||||||
*tx = NULL;
|
*tx = NULL;
|
||||||
}
|
}
|
||||||
|
@ -230,9 +230,7 @@ EXPORT_SYMBOL_GPL(async_xor);
|
|||||||
|
|
||||||
static int page_is_zero(struct page *p, unsigned int offset, size_t len)
|
static int page_is_zero(struct page *p, unsigned int offset, size_t len)
|
||||||
{
|
{
|
||||||
char *a = page_address(p) + offset;
|
return !memchr_inv(page_address(p) + offset, 0, len);
|
||||||
return ((*(u32 *) a) == 0 &&
|
|
||||||
memcmp(a, a + 4, len - 4) == 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct dma_chan *
|
static inline struct dma_chan *
|
||||||
|
@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
|
|||||||
|
|
||||||
raw_spin_lock_irqsave(&dca_lock, flags);
|
raw_spin_lock_irqsave(&dca_lock, flags);
|
||||||
|
|
||||||
|
if (list_empty(&dca_domains)) {
|
||||||
|
raw_spin_unlock_irqrestore(&dca_lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
list_del(&dca->node);
|
list_del(&dca->node);
|
||||||
|
|
||||||
pci_rc = dca_pci_rc_from_dev(dev);
|
pci_rc = dca_pci_rc_from_dev(dev);
|
||||||
|
@ -51,7 +51,7 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
|||||||
|
|
||||||
config AMBA_PL08X
|
config AMBA_PL08X
|
||||||
bool "ARM PrimeCell PL080 or PL081 support"
|
bool "ARM PrimeCell PL080 or PL081 support"
|
||||||
depends on ARM_AMBA && EXPERIMENTAL
|
depends on ARM_AMBA
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
select DMA_VIRTUAL_CHANNELS
|
select DMA_VIRTUAL_CHANNELS
|
||||||
help
|
help
|
||||||
@ -83,7 +83,6 @@ config INTEL_IOP_ADMA
|
|||||||
|
|
||||||
config DW_DMAC
|
config DW_DMAC
|
||||||
tristate "Synopsys DesignWare AHB DMA support"
|
tristate "Synopsys DesignWare AHB DMA support"
|
||||||
depends on HAVE_CLK
|
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
default y if CPU_AT32AP7000
|
default y if CPU_AT32AP7000
|
||||||
help
|
help
|
||||||
@ -215,8 +214,8 @@ config TIMB_DMA
|
|||||||
Enable support for the Timberdale FPGA DMA engine.
|
Enable support for the Timberdale FPGA DMA engine.
|
||||||
|
|
||||||
config SIRF_DMA
|
config SIRF_DMA
|
||||||
tristate "CSR SiRFprimaII DMA support"
|
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
|
||||||
depends on ARCH_PRIMA2
|
depends on ARCH_SIRF
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
help
|
help
|
||||||
Enable support for the CSR SiRFprimaII DMA engine.
|
Enable support for the CSR SiRFprimaII DMA engine.
|
||||||
@ -328,6 +327,10 @@ config DMA_ENGINE
|
|||||||
config DMA_VIRTUAL_CHANNELS
|
config DMA_VIRTUAL_CHANNELS
|
||||||
tristate
|
tristate
|
||||||
|
|
||||||
|
config DMA_OF
|
||||||
|
def_bool y
|
||||||
|
depends on OF
|
||||||
|
|
||||||
comment "DMA Clients"
|
comment "DMA Clients"
|
||||||
depends on DMA_ENGINE
|
depends on DMA_ENGINE
|
||||||
|
|
||||||
|
@ -3,6 +3,8 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
|
|||||||
|
|
||||||
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
|
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
|
||||||
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
|
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
|
||||||
|
obj-$(CONFIG_DMA_OF) += of-dma.o
|
||||||
|
|
||||||
obj-$(CONFIG_NET_DMA) += iovlock.o
|
obj-$(CONFIG_NET_DMA) += iovlock.o
|
||||||
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
|
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
|
||||||
obj-$(CONFIG_DMATEST) += dmatest.o
|
obj-$(CONFIG_DMATEST) += dmatest.o
|
||||||
|
@ -83,7 +83,7 @@
|
|||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <asm/hardware/pl080.h>
|
#include <linux/amba/pl080.h>
|
||||||
|
|
||||||
#include "dmaengine.h"
|
#include "dmaengine.h"
|
||||||
#include "virt-dma.h"
|
#include "virt-dma.h"
|
||||||
@ -1096,15 +1096,9 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
|
|||||||
struct pl08x_dma_chan *plchan)
|
struct pl08x_dma_chan *plchan)
|
||||||
{
|
{
|
||||||
LIST_HEAD(head);
|
LIST_HEAD(head);
|
||||||
struct pl08x_txd *txd;
|
|
||||||
|
|
||||||
vchan_get_all_descriptors(&plchan->vc, &head);
|
vchan_get_all_descriptors(&plchan->vc, &head);
|
||||||
|
vchan_dma_desc_free_list(&plchan->vc, &head);
|
||||||
while (!list_empty(&head)) {
|
|
||||||
txd = list_first_entry(&head, struct pl08x_txd, vd.node);
|
|
||||||
list_del(&txd->vd.node);
|
|
||||||
pl08x_desc_free(&txd->vd);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -778,7 +778,7 @@ err:
|
|||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
|
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
|
||||||
size_t period_len, enum dma_transfer_direction direction)
|
size_t period_len)
|
||||||
{
|
{
|
||||||
if (period_len > (ATC_BTSIZE_MAX << reg_width))
|
if (period_len > (ATC_BTSIZE_MAX << reg_width))
|
||||||
goto err_out;
|
goto err_out;
|
||||||
@ -786,8 +786,6 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
|
|||||||
goto err_out;
|
goto err_out;
|
||||||
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
||||||
goto err_out;
|
goto err_out;
|
||||||
if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
|
|
||||||
goto err_out;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -886,14 +884,16 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(!is_slave_direction(direction)))
|
||||||
|
goto err_out;
|
||||||
|
|
||||||
if (sconfig->direction == DMA_MEM_TO_DEV)
|
if (sconfig->direction == DMA_MEM_TO_DEV)
|
||||||
reg_width = convert_buswidth(sconfig->dst_addr_width);
|
reg_width = convert_buswidth(sconfig->dst_addr_width);
|
||||||
else
|
else
|
||||||
reg_width = convert_buswidth(sconfig->src_addr_width);
|
reg_width = convert_buswidth(sconfig->src_addr_width);
|
||||||
|
|
||||||
/* Check for too big/unaligned periods and unaligned DMA buffer */
|
/* Check for too big/unaligned periods and unaligned DMA buffer */
|
||||||
if (atc_dma_cyclic_check_values(reg_width, buf_addr,
|
if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
|
||||||
period_len, direction))
|
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
/* build cyclic linked list */
|
/* build cyclic linked list */
|
||||||
|
@ -369,10 +369,10 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
|
|||||||
|
|
||||||
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
|
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
|
||||||
{
|
{
|
||||||
dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common),
|
dev_crit(chan2dev(&atchan->chan_common),
|
||||||
" desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
|
" desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
|
||||||
lli->saddr, lli->daddr,
|
lli->saddr, lli->daddr,
|
||||||
lli->ctrla, lli->ctrlb, lli->dscr);
|
lli->ctrla, lli->ctrlb, lli->dscr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2355,7 +2355,9 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
/* FIXME: should be conditional on ret != DMA_SUCCESS? */
|
if (ret == DMA_SUCCESS)
|
||||||
|
return ret;
|
||||||
|
|
||||||
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
|
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
|
||||||
|
|
||||||
if (ret == DMA_IN_PROGRESS && cohc->stopped)
|
if (ret == DMA_IN_PROGRESS && cohc->stopped)
|
||||||
|
@ -61,7 +61,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
|
|||||||
dma_addr_t phy;
|
dma_addr_t phy;
|
||||||
|
|
||||||
if (len == 0)
|
if (len == 0)
|
||||||
goto err;
|
return NULL;
|
||||||
|
|
||||||
spin_lock(&pool->lock);
|
spin_lock(&pool->lock);
|
||||||
|
|
||||||
|
@ -62,6 +62,7 @@
|
|||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/of_dma.h>
|
||||||
|
|
||||||
static DEFINE_MUTEX(dma_list_mutex);
|
static DEFINE_MUTEX(dma_list_mutex);
|
||||||
static DEFINE_IDR(dma_idr);
|
static DEFINE_IDR(dma_idr);
|
||||||
@ -266,7 +267,10 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
|
|||||||
pr_err("%s: timeout!\n", __func__);
|
pr_err("%s: timeout!\n", __func__);
|
||||||
return DMA_ERROR;
|
return DMA_ERROR;
|
||||||
}
|
}
|
||||||
} while (status == DMA_IN_PROGRESS);
|
if (status != DMA_IN_PROGRESS)
|
||||||
|
break;
|
||||||
|
cpu_relax();
|
||||||
|
} while (1);
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -546,6 +550,21 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__dma_request_channel);
|
EXPORT_SYMBOL_GPL(__dma_request_channel);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_request_slave_channel - try to allocate an exclusive slave channel
|
||||||
|
* @dev: pointer to client device structure
|
||||||
|
* @name: slave channel name
|
||||||
|
*/
|
||||||
|
struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
|
||||||
|
{
|
||||||
|
/* If device-tree is present get slave info from here */
|
||||||
|
if (dev->of_node)
|
||||||
|
return of_dma_request_slave_channel(dev->of_node, name);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
|
||||||
|
|
||||||
void dma_release_channel(struct dma_chan *chan)
|
void dma_release_channel(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
mutex_lock(&dma_list_mutex);
|
mutex_lock(&dma_list_mutex);
|
||||||
|
@ -242,6 +242,13 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
|
|||||||
dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
|
dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int min_odd(unsigned int x, unsigned int y)
|
||||||
|
{
|
||||||
|
unsigned int val = min(x, y);
|
||||||
|
|
||||||
|
return val % 2 ? val : val - 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function repeatedly tests DMA transfers of various lengths and
|
* This function repeatedly tests DMA transfers of various lengths and
|
||||||
* offsets for a given operation type until it is told to exit by
|
* offsets for a given operation type until it is told to exit by
|
||||||
@ -262,6 +269,7 @@ static int dmatest_func(void *data)
|
|||||||
struct dmatest_thread *thread = data;
|
struct dmatest_thread *thread = data;
|
||||||
struct dmatest_done done = { .wait = &done_wait };
|
struct dmatest_done done = { .wait = &done_wait };
|
||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
|
struct dma_device *dev;
|
||||||
const char *thread_name;
|
const char *thread_name;
|
||||||
unsigned int src_off, dst_off, len;
|
unsigned int src_off, dst_off, len;
|
||||||
unsigned int error_count;
|
unsigned int error_count;
|
||||||
@ -283,13 +291,16 @@ static int dmatest_func(void *data)
|
|||||||
|
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
chan = thread->chan;
|
chan = thread->chan;
|
||||||
|
dev = chan->device;
|
||||||
if (thread->type == DMA_MEMCPY)
|
if (thread->type == DMA_MEMCPY)
|
||||||
src_cnt = dst_cnt = 1;
|
src_cnt = dst_cnt = 1;
|
||||||
else if (thread->type == DMA_XOR) {
|
else if (thread->type == DMA_XOR) {
|
||||||
src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
|
/* force odd to ensure dst = src */
|
||||||
|
src_cnt = min_odd(xor_sources | 1, dev->max_xor);
|
||||||
dst_cnt = 1;
|
dst_cnt = 1;
|
||||||
} else if (thread->type == DMA_PQ) {
|
} else if (thread->type == DMA_PQ) {
|
||||||
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
|
/* force odd to ensure dst = src */
|
||||||
|
src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
|
||||||
dst_cnt = 2;
|
dst_cnt = 2;
|
||||||
for (i = 0; i < src_cnt; i++)
|
for (i = 0; i < src_cnt; i++)
|
||||||
pq_coefs[i] = 1;
|
pq_coefs[i] = 1;
|
||||||
@ -327,7 +338,6 @@ static int dmatest_func(void *data)
|
|||||||
|
|
||||||
while (!kthread_should_stop()
|
while (!kthread_should_stop()
|
||||||
&& !(iterations && total_tests >= iterations)) {
|
&& !(iterations && total_tests >= iterations)) {
|
||||||
struct dma_device *dev = chan->device;
|
|
||||||
struct dma_async_tx_descriptor *tx = NULL;
|
struct dma_async_tx_descriptor *tx = NULL;
|
||||||
dma_addr_t dma_srcs[src_cnt];
|
dma_addr_t dma_srcs[src_cnt];
|
||||||
dma_addr_t dma_dsts[dst_cnt];
|
dma_addr_t dma_dsts[dst_cnt];
|
||||||
@ -526,7 +536,9 @@ err_srcs:
|
|||||||
thread_name, total_tests, failed_tests, ret);
|
thread_name, total_tests, failed_tests, ret);
|
||||||
|
|
||||||
/* terminate all transfers on specified channels */
|
/* terminate all transfers on specified channels */
|
||||||
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
|
if (ret)
|
||||||
|
dmaengine_terminate_all(chan);
|
||||||
|
|
||||||
if (iterations > 0)
|
if (iterations > 0)
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
|
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
|
||||||
@ -551,7 +563,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* terminate all transfers on specified channels */
|
/* terminate all transfers on specified channels */
|
||||||
dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
|
dmaengine_terminate_all(dtc->chan);
|
||||||
|
|
||||||
kfree(dtc);
|
kfree(dtc);
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
|
* Core driver for the Synopsys DesignWare DMA Controller
|
||||||
* AVR32 systems.)
|
|
||||||
*
|
*
|
||||||
* Copyright (C) 2007-2008 Atmel Corporation
|
* Copyright (C) 2007-2008 Atmel Corporation
|
||||||
* Copyright (C) 2010-2011 ST Microelectronics
|
* Copyright (C) 2010-2011 ST Microelectronics
|
||||||
@ -9,11 +8,13 @@
|
|||||||
* it under the terms of the GNU General Public License version 2 as
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
* published by the Free Software Foundation.
|
* published by the Free Software Foundation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
#include <linux/dmapool.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
@ -47,15 +48,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
|
|||||||
return slave ? slave->src_master : 1;
|
return slave ? slave->src_master : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define SRC_MASTER 0
|
||||||
|
#define DST_MASTER 1
|
||||||
|
|
||||||
|
static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
|
||||||
|
{
|
||||||
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||||
|
struct dw_dma_slave *dws = chan->private;
|
||||||
|
unsigned int m;
|
||||||
|
|
||||||
|
if (master == SRC_MASTER)
|
||||||
|
m = dwc_get_sms(dws);
|
||||||
|
else
|
||||||
|
m = dwc_get_dms(dws);
|
||||||
|
|
||||||
|
return min_t(unsigned int, dw->nr_masters - 1, m);
|
||||||
|
}
|
||||||
|
|
||||||
#define DWC_DEFAULT_CTLLO(_chan) ({ \
|
#define DWC_DEFAULT_CTLLO(_chan) ({ \
|
||||||
struct dw_dma_slave *__slave = (_chan->private); \
|
|
||||||
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
|
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
|
||||||
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
|
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
|
||||||
int _dms = dwc_get_dms(__slave); \
|
bool _is_slave = is_slave_direction(_dwc->direction); \
|
||||||
int _sms = dwc_get_sms(__slave); \
|
int _dms = dwc_get_master(_chan, DST_MASTER); \
|
||||||
u8 _smsize = __slave ? _sconfig->src_maxburst : \
|
int _sms = dwc_get_master(_chan, SRC_MASTER); \
|
||||||
|
u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
|
||||||
DW_DMA_MSIZE_16; \
|
DW_DMA_MSIZE_16; \
|
||||||
u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
|
u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
|
||||||
DW_DMA_MSIZE_16; \
|
DW_DMA_MSIZE_16; \
|
||||||
\
|
\
|
||||||
(DWC_CTLL_DST_MSIZE(_dmsize) \
|
(DWC_CTLL_DST_MSIZE(_dmsize) \
|
||||||
@ -73,15 +91,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
|
|||||||
*/
|
*/
|
||||||
#define NR_DESCS_PER_CHANNEL 64
|
#define NR_DESCS_PER_CHANNEL 64
|
||||||
|
|
||||||
/*----------------------------------------------------------------------*/
|
static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
|
||||||
|
{
|
||||||
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||||
|
|
||||||
/*
|
return dw->data_width[dwc_get_master(chan, master)];
|
||||||
* Because we're not relying on writeback from the controller (it may not
|
}
|
||||||
* even be configured into the core!) we don't need to use dma_pool. These
|
|
||||||
* descriptors -- and associated data -- are cacheable. We do need to make
|
/*----------------------------------------------------------------------*/
|
||||||
* sure their dcache entries are written back before handing them off to
|
|
||||||
* the controller, though.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static struct device *chan2dev(struct dma_chan *chan)
|
static struct device *chan2dev(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
@ -94,7 +111,7 @@ static struct device *chan2parent(struct dma_chan *chan)
|
|||||||
|
|
||||||
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
|
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
|
||||||
{
|
{
|
||||||
return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
|
return to_dw_desc(dwc->active_list.next);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
||||||
@ -121,19 +138,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|
||||||
{
|
|
||||||
struct dw_desc *child;
|
|
||||||
|
|
||||||
list_for_each_entry(child, &desc->tx_list, desc_node)
|
|
||||||
dma_sync_single_for_cpu(chan2parent(&dwc->chan),
|
|
||||||
child->txd.phys, sizeof(child->lli),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
dma_sync_single_for_cpu(chan2parent(&dwc->chan),
|
|
||||||
desc->txd.phys, sizeof(desc->lli),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Move a descriptor, including any children, to the free list.
|
* Move a descriptor, including any children, to the free list.
|
||||||
* `desc' must not be on any lists.
|
* `desc' must not be on any lists.
|
||||||
@ -145,8 +149,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|||||||
if (desc) {
|
if (desc) {
|
||||||
struct dw_desc *child;
|
struct dw_desc *child;
|
||||||
|
|
||||||
dwc_sync_desc_for_cpu(dwc, desc);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dwc->lock, flags);
|
spin_lock_irqsave(&dwc->lock, flags);
|
||||||
list_for_each_entry(child, &desc->tx_list, desc_node)
|
list_for_each_entry(child, &desc->tx_list, desc_node)
|
||||||
dev_vdbg(chan2dev(&dwc->chan),
|
dev_vdbg(chan2dev(&dwc->chan),
|
||||||
@ -179,9 +181,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
|
|||||||
cfghi = dws->cfg_hi;
|
cfghi = dws->cfg_hi;
|
||||||
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
|
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
|
||||||
} else {
|
} else {
|
||||||
if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
|
if (dwc->direction == DMA_MEM_TO_DEV)
|
||||||
cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
|
cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
|
||||||
else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
|
else if (dwc->direction == DMA_DEV_TO_MEM)
|
||||||
cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
|
cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -223,7 +225,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
|
|||||||
channel_readl(dwc, CTL_LO));
|
channel_readl(dwc, CTL_LO));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||||
{
|
{
|
||||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||||
@ -249,6 +250,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
|
|||||||
channel_writel(dwc, CTL_LO, ctllo);
|
channel_writel(dwc, CTL_LO, ctllo);
|
||||||
channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
|
channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
|
||||||
channel_set_bit(dw, CH_EN, dwc->mask);
|
channel_set_bit(dw, CH_EN, dwc->mask);
|
||||||
|
|
||||||
|
/* Move pointer to next descriptor */
|
||||||
|
dwc->tx_node_active = dwc->tx_node_active->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called with dwc->lock held and bh disabled */
|
/* Called with dwc->lock held and bh disabled */
|
||||||
@ -279,9 +283,10 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
|||||||
|
|
||||||
dwc_initialize(dwc);
|
dwc_initialize(dwc);
|
||||||
|
|
||||||
dwc->tx_list = &first->tx_list;
|
dwc->residue = first->total_len;
|
||||||
dwc->tx_node_active = first->tx_list.next;
|
dwc->tx_node_active = &first->tx_list;
|
||||||
|
|
||||||
|
/* Submit first block */
|
||||||
dwc_do_single_block(dwc, first);
|
dwc_do_single_block(dwc, first);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@ -317,8 +322,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
|||||||
param = txd->callback_param;
|
param = txd->callback_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
dwc_sync_desc_for_cpu(dwc, desc);
|
|
||||||
|
|
||||||
/* async_tx_ack */
|
/* async_tx_ack */
|
||||||
list_for_each_entry(child, &desc->tx_list, desc_node)
|
list_for_each_entry(child, &desc->tx_list, desc_node)
|
||||||
async_tx_ack(&child->txd);
|
async_tx_ack(&child->txd);
|
||||||
@ -327,29 +330,29 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
|||||||
list_splice_init(&desc->tx_list, &dwc->free_list);
|
list_splice_init(&desc->tx_list, &dwc->free_list);
|
||||||
list_move(&desc->desc_node, &dwc->free_list);
|
list_move(&desc->desc_node, &dwc->free_list);
|
||||||
|
|
||||||
if (!dwc->chan.private) {
|
if (!is_slave_direction(dwc->direction)) {
|
||||||
struct device *parent = chan2parent(&dwc->chan);
|
struct device *parent = chan2parent(&dwc->chan);
|
||||||
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||||||
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
|
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
|
||||||
dma_unmap_single(parent, desc->lli.dar,
|
dma_unmap_single(parent, desc->lli.dar,
|
||||||
desc->len, DMA_FROM_DEVICE);
|
desc->total_len, DMA_FROM_DEVICE);
|
||||||
else
|
else
|
||||||
dma_unmap_page(parent, desc->lli.dar,
|
dma_unmap_page(parent, desc->lli.dar,
|
||||||
desc->len, DMA_FROM_DEVICE);
|
desc->total_len, DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
|
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
|
||||||
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
|
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
|
||||||
dma_unmap_single(parent, desc->lli.sar,
|
dma_unmap_single(parent, desc->lli.sar,
|
||||||
desc->len, DMA_TO_DEVICE);
|
desc->total_len, DMA_TO_DEVICE);
|
||||||
else
|
else
|
||||||
dma_unmap_page(parent, desc->lli.sar,
|
dma_unmap_page(parent, desc->lli.sar,
|
||||||
desc->len, DMA_TO_DEVICE);
|
desc->total_len, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
|
|
||||||
if (callback_required && callback)
|
if (callback)
|
||||||
callback(param);
|
callback(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -384,6 +387,15 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||||||
dwc_descriptor_complete(dwc, desc, true);
|
dwc_descriptor_complete(dwc, desc, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns how many bytes were already received from source */
|
||||||
|
static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
|
||||||
|
{
|
||||||
|
u32 ctlhi = channel_readl(dwc, CTL_HI);
|
||||||
|
u32 ctllo = channel_readl(dwc, CTL_LO);
|
||||||
|
|
||||||
|
return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
|
||||||
|
}
|
||||||
|
|
||||||
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||||
{
|
{
|
||||||
dma_addr_t llp;
|
dma_addr_t llp;
|
||||||
@ -399,6 +411,39 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||||||
if (status_xfer & dwc->mask) {
|
if (status_xfer & dwc->mask) {
|
||||||
/* Everything we've submitted is done */
|
/* Everything we've submitted is done */
|
||||||
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||||||
|
|
||||||
|
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
|
||||||
|
struct list_head *head, *active = dwc->tx_node_active;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are inside first active descriptor.
|
||||||
|
* Otherwise something is really wrong.
|
||||||
|
*/
|
||||||
|
desc = dwc_first_active(dwc);
|
||||||
|
|
||||||
|
head = &desc->tx_list;
|
||||||
|
if (active != head) {
|
||||||
|
/* Update desc to reflect last sent one */
|
||||||
|
if (active != head->next)
|
||||||
|
desc = to_dw_desc(active->prev);
|
||||||
|
|
||||||
|
dwc->residue -= desc->len;
|
||||||
|
|
||||||
|
child = to_dw_desc(active);
|
||||||
|
|
||||||
|
/* Submit next block */
|
||||||
|
dwc_do_single_block(dwc, child);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We are done here */
|
||||||
|
clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
dwc->residue = 0;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
|
|
||||||
dwc_complete_all(dw, dwc);
|
dwc_complete_all(dw, dwc);
|
||||||
@ -406,6 +451,13 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (list_empty(&dwc->active_list)) {
|
if (list_empty(&dwc->active_list)) {
|
||||||
|
dwc->residue = 0;
|
||||||
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
|
||||||
|
dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -414,6 +466,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||||||
(unsigned long long)llp);
|
(unsigned long long)llp);
|
||||||
|
|
||||||
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
|
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
|
||||||
|
/* initial residue value */
|
||||||
|
dwc->residue = desc->total_len;
|
||||||
|
|
||||||
/* check first descriptors addr */
|
/* check first descriptors addr */
|
||||||
if (desc->txd.phys == llp) {
|
if (desc->txd.phys == llp) {
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
@ -423,16 +478,21 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||||||
/* check first descriptors llp */
|
/* check first descriptors llp */
|
||||||
if (desc->lli.llp == llp) {
|
if (desc->lli.llp == llp) {
|
||||||
/* This one is currently in progress */
|
/* This one is currently in progress */
|
||||||
|
dwc->residue -= dwc_get_sent(dwc);
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(child, &desc->tx_list, desc_node)
|
dwc->residue -= desc->len;
|
||||||
|
list_for_each_entry(child, &desc->tx_list, desc_node) {
|
||||||
if (child->lli.llp == llp) {
|
if (child->lli.llp == llp) {
|
||||||
/* Currently in progress */
|
/* Currently in progress */
|
||||||
|
dwc->residue -= dwc_get_sent(dwc);
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
dwc->residue -= child->len;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No descriptors so far seem to be in progress, i.e.
|
* No descriptors so far seem to be in progress, i.e.
|
||||||
@ -458,9 +518,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||||||
|
|
||||||
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
|
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
|
||||||
{
|
{
|
||||||
dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
|
dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
|
||||||
" desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
|
lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
|
||||||
lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||||
@ -488,16 +547,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||||||
dwc_dostart(dwc, dwc_first_active(dwc));
|
dwc_dostart(dwc, dwc_first_active(dwc));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* KERN_CRITICAL may seem harsh, but since this only happens
|
* WARN may seem harsh, but since this only happens
|
||||||
* when someone submits a bad physical address in a
|
* when someone submits a bad physical address in a
|
||||||
* descriptor, we should consider ourselves lucky that the
|
* descriptor, we should consider ourselves lucky that the
|
||||||
* controller flagged an error instead of scribbling over
|
* controller flagged an error instead of scribbling over
|
||||||
* random memory locations.
|
* random memory locations.
|
||||||
*/
|
*/
|
||||||
dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
|
dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
|
||||||
"Bad descriptor submitted for DMA!\n");
|
" cookie: %d\n", bad_desc->txd.cookie);
|
||||||
dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
|
|
||||||
" cookie: %d\n", bad_desc->txd.cookie);
|
|
||||||
dwc_dump_lli(dwc, &bad_desc->lli);
|
dwc_dump_lli(dwc, &bad_desc->lli);
|
||||||
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
|
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
|
||||||
dwc_dump_lli(dwc, &child->lli);
|
dwc_dump_lli(dwc, &child->lli);
|
||||||
@ -598,36 +655,8 @@ static void dw_dma_tasklet(unsigned long data)
|
|||||||
dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
|
dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
|
||||||
else if (status_err & (1 << i))
|
else if (status_err & (1 << i))
|
||||||
dwc_handle_error(dw, dwc);
|
dwc_handle_error(dw, dwc);
|
||||||
else if (status_xfer & (1 << i)) {
|
else if (status_xfer & (1 << i))
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dwc->lock, flags);
|
|
||||||
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
|
|
||||||
if (dwc->tx_node_active != dwc->tx_list) {
|
|
||||||
struct dw_desc *desc =
|
|
||||||
list_entry(dwc->tx_node_active,
|
|
||||||
struct dw_desc,
|
|
||||||
desc_node);
|
|
||||||
|
|
||||||
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
|
||||||
|
|
||||||
/* move pointer to next descriptor */
|
|
||||||
dwc->tx_node_active =
|
|
||||||
dwc->tx_node_active->next;
|
|
||||||
|
|
||||||
dwc_do_single_block(dwc, desc);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
/* we are done here */
|
|
||||||
clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
||||||
|
|
||||||
dwc_scan_descriptors(dw, dwc);
|
dwc_scan_descriptors(dw, dwc);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -709,7 +738,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||||||
size_t len, unsigned long flags)
|
size_t len, unsigned long flags)
|
||||||
{
|
{
|
||||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
struct dw_dma_slave *dws = chan->private;
|
|
||||||
struct dw_desc *desc;
|
struct dw_desc *desc;
|
||||||
struct dw_desc *first;
|
struct dw_desc *first;
|
||||||
struct dw_desc *prev;
|
struct dw_desc *prev;
|
||||||
@ -730,8 +758,10 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)],
|
dwc->direction = DMA_MEM_TO_MEM;
|
||||||
dwc->dw->data_width[dwc_get_dms(dws)]);
|
|
||||||
|
data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
|
||||||
|
dwc_get_data_width(chan, DST_MASTER));
|
||||||
|
|
||||||
src_width = dst_width = min_t(unsigned int, data_width,
|
src_width = dst_width = min_t(unsigned int, data_width,
|
||||||
dwc_fast_fls(src | dest | len));
|
dwc_fast_fls(src | dest | len));
|
||||||
@ -756,32 +786,25 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||||||
desc->lli.dar = dest + offset;
|
desc->lli.dar = dest + offset;
|
||||||
desc->lli.ctllo = ctllo;
|
desc->lli.ctllo = ctllo;
|
||||||
desc->lli.ctlhi = xfer_count;
|
desc->lli.ctlhi = xfer_count;
|
||||||
|
desc->len = xfer_count << src_width;
|
||||||
|
|
||||||
if (!first) {
|
if (!first) {
|
||||||
first = desc;
|
first = desc;
|
||||||
} else {
|
} else {
|
||||||
prev->lli.llp = desc->txd.phys;
|
prev->lli.llp = desc->txd.phys;
|
||||||
dma_sync_single_for_device(chan2parent(chan),
|
|
||||||
prev->txd.phys, sizeof(prev->lli),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
list_add_tail(&desc->desc_node,
|
list_add_tail(&desc->desc_node,
|
||||||
&first->tx_list);
|
&first->tx_list);
|
||||||
}
|
}
|
||||||
prev = desc;
|
prev = desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (flags & DMA_PREP_INTERRUPT)
|
if (flags & DMA_PREP_INTERRUPT)
|
||||||
/* Trigger interrupt after last block */
|
/* Trigger interrupt after last block */
|
||||||
prev->lli.ctllo |= DWC_CTLL_INT_EN;
|
prev->lli.ctllo |= DWC_CTLL_INT_EN;
|
||||||
|
|
||||||
prev->lli.llp = 0;
|
prev->lli.llp = 0;
|
||||||
dma_sync_single_for_device(chan2parent(chan),
|
|
||||||
prev->txd.phys, sizeof(prev->lli),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
first->txd.flags = flags;
|
first->txd.flags = flags;
|
||||||
first->len = len;
|
first->total_len = len;
|
||||||
|
|
||||||
return &first->txd;
|
return &first->txd;
|
||||||
|
|
||||||
@ -796,7 +819,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
unsigned long flags, void *context)
|
unsigned long flags, void *context)
|
||||||
{
|
{
|
||||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
struct dw_dma_slave *dws = chan->private;
|
|
||||||
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
|
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
|
||||||
struct dw_desc *prev;
|
struct dw_desc *prev;
|
||||||
struct dw_desc *first;
|
struct dw_desc *first;
|
||||||
@ -811,9 +833,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
|
|
||||||
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||||||
|
|
||||||
if (unlikely(!dws || !sg_len))
|
if (unlikely(!is_slave_direction(direction) || !sg_len))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
dwc->direction = direction;
|
||||||
|
|
||||||
prev = first = NULL;
|
prev = first = NULL;
|
||||||
|
|
||||||
switch (direction) {
|
switch (direction) {
|
||||||
@ -828,7 +852,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
|
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
|
||||||
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
|
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
|
||||||
|
|
||||||
data_width = dwc->dw->data_width[dwc_get_sms(dws)];
|
data_width = dwc_get_data_width(chan, SRC_MASTER);
|
||||||
|
|
||||||
for_each_sg(sgl, sg, sg_len, i) {
|
for_each_sg(sgl, sg, sg_len, i) {
|
||||||
struct dw_desc *desc;
|
struct dw_desc *desc;
|
||||||
@ -861,15 +885,12 @@ slave_sg_todev_fill_desc:
|
|||||||
}
|
}
|
||||||
|
|
||||||
desc->lli.ctlhi = dlen >> mem_width;
|
desc->lli.ctlhi = dlen >> mem_width;
|
||||||
|
desc->len = dlen;
|
||||||
|
|
||||||
if (!first) {
|
if (!first) {
|
||||||
first = desc;
|
first = desc;
|
||||||
} else {
|
} else {
|
||||||
prev->lli.llp = desc->txd.phys;
|
prev->lli.llp = desc->txd.phys;
|
||||||
dma_sync_single_for_device(chan2parent(chan),
|
|
||||||
prev->txd.phys,
|
|
||||||
sizeof(prev->lli),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
list_add_tail(&desc->desc_node,
|
list_add_tail(&desc->desc_node,
|
||||||
&first->tx_list);
|
&first->tx_list);
|
||||||
}
|
}
|
||||||
@ -891,7 +912,7 @@ slave_sg_todev_fill_desc:
|
|||||||
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
|
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
|
||||||
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
|
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
|
||||||
|
|
||||||
data_width = dwc->dw->data_width[dwc_get_dms(dws)];
|
data_width = dwc_get_data_width(chan, DST_MASTER);
|
||||||
|
|
||||||
for_each_sg(sgl, sg, sg_len, i) {
|
for_each_sg(sgl, sg, sg_len, i) {
|
||||||
struct dw_desc *desc;
|
struct dw_desc *desc;
|
||||||
@ -923,15 +944,12 @@ slave_sg_fromdev_fill_desc:
|
|||||||
len = 0;
|
len = 0;
|
||||||
}
|
}
|
||||||
desc->lli.ctlhi = dlen >> reg_width;
|
desc->lli.ctlhi = dlen >> reg_width;
|
||||||
|
desc->len = dlen;
|
||||||
|
|
||||||
if (!first) {
|
if (!first) {
|
||||||
first = desc;
|
first = desc;
|
||||||
} else {
|
} else {
|
||||||
prev->lli.llp = desc->txd.phys;
|
prev->lli.llp = desc->txd.phys;
|
||||||
dma_sync_single_for_device(chan2parent(chan),
|
|
||||||
prev->txd.phys,
|
|
||||||
sizeof(prev->lli),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
list_add_tail(&desc->desc_node,
|
list_add_tail(&desc->desc_node,
|
||||||
&first->tx_list);
|
&first->tx_list);
|
||||||
}
|
}
|
||||||
@ -951,11 +969,7 @@ slave_sg_fromdev_fill_desc:
|
|||||||
prev->lli.ctllo |= DWC_CTLL_INT_EN;
|
prev->lli.ctllo |= DWC_CTLL_INT_EN;
|
||||||
|
|
||||||
prev->lli.llp = 0;
|
prev->lli.llp = 0;
|
||||||
dma_sync_single_for_device(chan2parent(chan),
|
first->total_len = total_len;
|
||||||
prev->txd.phys, sizeof(prev->lli),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
first->len = total_len;
|
|
||||||
|
|
||||||
return &first->txd;
|
return &first->txd;
|
||||||
|
|
||||||
@ -985,11 +999,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
|
|||||||
{
|
{
|
||||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
|
|
||||||
/* Check if it is chan is configured for slave transfers */
|
/* Check if chan will be configured for slave transfers */
|
||||||
if (!chan->private)
|
if (!is_slave_direction(sconfig->direction))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
|
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
|
||||||
|
dwc->direction = sconfig->direction;
|
||||||
|
|
||||||
convert_burst(&dwc->dma_sconfig.src_maxburst);
|
convert_burst(&dwc->dma_sconfig.src_maxburst);
|
||||||
convert_burst(&dwc->dma_sconfig.dst_maxburst);
|
convert_burst(&dwc->dma_sconfig.dst_maxburst);
|
||||||
@ -997,6 +1012,26 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
|
||||||
|
{
|
||||||
|
u32 cfglo = channel_readl(dwc, CFG_LO);
|
||||||
|
|
||||||
|
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
|
||||||
|
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
dwc->paused = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
|
||||||
|
{
|
||||||
|
u32 cfglo = channel_readl(dwc, CFG_LO);
|
||||||
|
|
||||||
|
channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
|
||||||
|
|
||||||
|
dwc->paused = false;
|
||||||
|
}
|
||||||
|
|
||||||
static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||||
unsigned long arg)
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
@ -1004,18 +1039,13 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||||||
struct dw_dma *dw = to_dw_dma(chan->device);
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||||
struct dw_desc *desc, *_desc;
|
struct dw_desc *desc, *_desc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 cfglo;
|
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
|
|
||||||
if (cmd == DMA_PAUSE) {
|
if (cmd == DMA_PAUSE) {
|
||||||
spin_lock_irqsave(&dwc->lock, flags);
|
spin_lock_irqsave(&dwc->lock, flags);
|
||||||
|
|
||||||
cfglo = channel_readl(dwc, CFG_LO);
|
dwc_chan_pause(dwc);
|
||||||
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
|
|
||||||
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
dwc->paused = true;
|
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
} else if (cmd == DMA_RESUME) {
|
} else if (cmd == DMA_RESUME) {
|
||||||
if (!dwc->paused)
|
if (!dwc->paused)
|
||||||
@ -1023,9 +1053,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||||||
|
|
||||||
spin_lock_irqsave(&dwc->lock, flags);
|
spin_lock_irqsave(&dwc->lock, flags);
|
||||||
|
|
||||||
cfglo = channel_readl(dwc, CFG_LO);
|
dwc_chan_resume(dwc);
|
||||||
channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
|
|
||||||
dwc->paused = false;
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
} else if (cmd == DMA_TERMINATE_ALL) {
|
} else if (cmd == DMA_TERMINATE_ALL) {
|
||||||
@ -1035,7 +1063,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||||||
|
|
||||||
dwc_chan_disable(dw, dwc);
|
dwc_chan_disable(dw, dwc);
|
||||||
|
|
||||||
dwc->paused = false;
|
dwc_chan_resume(dwc);
|
||||||
|
|
||||||
/* active_list entries will end up before queued entries */
|
/* active_list entries will end up before queued entries */
|
||||||
list_splice_init(&dwc->queue, &list);
|
list_splice_init(&dwc->queue, &list);
|
||||||
@ -1055,6 +1083,21 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
u32 residue;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dwc->lock, flags);
|
||||||
|
|
||||||
|
residue = dwc->residue;
|
||||||
|
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
|
||||||
|
residue -= dwc_get_sent(dwc);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
|
return residue;
|
||||||
|
}
|
||||||
|
|
||||||
static enum dma_status
|
static enum dma_status
|
||||||
dwc_tx_status(struct dma_chan *chan,
|
dwc_tx_status(struct dma_chan *chan,
|
||||||
dma_cookie_t cookie,
|
dma_cookie_t cookie,
|
||||||
@ -1071,7 +1114,7 @@ dwc_tx_status(struct dma_chan *chan,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ret != DMA_SUCCESS)
|
if (ret != DMA_SUCCESS)
|
||||||
dma_set_residue(txstate, dwc_first_active(dwc)->len);
|
dma_set_residue(txstate, dwc_get_residue(dwc));
|
||||||
|
|
||||||
if (dwc->paused)
|
if (dwc->paused)
|
||||||
return DMA_PAUSED;
|
return DMA_PAUSED;
|
||||||
@ -1114,22 +1157,22 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
spin_lock_irqsave(&dwc->lock, flags);
|
spin_lock_irqsave(&dwc->lock, flags);
|
||||||
i = dwc->descs_allocated;
|
i = dwc->descs_allocated;
|
||||||
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
|
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
|
||||||
|
dma_addr_t phys;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||||
|
|
||||||
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
|
desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
|
||||||
if (!desc) {
|
if (!desc)
|
||||||
dev_info(chan2dev(chan),
|
goto err_desc_alloc;
|
||||||
"only allocated %d descriptors\n", i);
|
|
||||||
spin_lock_irqsave(&dwc->lock, flags);
|
memset(desc, 0, sizeof(struct dw_desc));
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&desc->tx_list);
|
INIT_LIST_HEAD(&desc->tx_list);
|
||||||
dma_async_tx_descriptor_init(&desc->txd, chan);
|
dma_async_tx_descriptor_init(&desc->txd, chan);
|
||||||
desc->txd.tx_submit = dwc_tx_submit;
|
desc->txd.tx_submit = dwc_tx_submit;
|
||||||
desc->txd.flags = DMA_CTRL_ACK;
|
desc->txd.flags = DMA_CTRL_ACK;
|
||||||
desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
|
desc->txd.phys = phys;
|
||||||
sizeof(desc->lli), DMA_TO_DEVICE);
|
|
||||||
dwc_desc_put(dwc, desc);
|
dwc_desc_put(dwc, desc);
|
||||||
|
|
||||||
spin_lock_irqsave(&dwc->lock, flags);
|
spin_lock_irqsave(&dwc->lock, flags);
|
||||||
@ -1140,6 +1183,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
|
|
||||||
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
|
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
|
||||||
|
|
||||||
|
return i;
|
||||||
|
|
||||||
|
err_desc_alloc:
|
||||||
|
dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
|
||||||
|
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1172,14 +1220,56 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||||||
|
|
||||||
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
|
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
|
||||||
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
|
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
|
||||||
dma_unmap_single(chan2parent(chan), desc->txd.phys,
|
dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
|
||||||
sizeof(desc->lli), DMA_TO_DEVICE);
|
|
||||||
kfree(desc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
|
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
|
||||||
|
{
|
||||||
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||||
|
static struct dw_dma *last_dw;
|
||||||
|
static char *last_bus_id;
|
||||||
|
int i = -1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dmaengine framework calls this routine for all channels of all dma
|
||||||
|
* controller, until true is returned. If 'param' bus_id is not
|
||||||
|
* registered with a dma controller (dw), then there is no need of
|
||||||
|
* running below function for all channels of dw.
|
||||||
|
*
|
||||||
|
* This block of code does this by saving the parameters of last
|
||||||
|
* failure. If dw and param are same, i.e. trying on same dw with
|
||||||
|
* different channel, return false.
|
||||||
|
*/
|
||||||
|
if ((last_dw == dw) && (last_bus_id == param))
|
||||||
|
return false;
|
||||||
|
/*
|
||||||
|
* Return true:
|
||||||
|
* - If dw_dma's platform data is not filled with slave info, then all
|
||||||
|
* dma controllers are fine for transfer.
|
||||||
|
* - Or if param is NULL
|
||||||
|
*/
|
||||||
|
if (!dw->sd || !param)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
while (++i < dw->sd_count) {
|
||||||
|
if (!strcmp(dw->sd[i].bus_id, param)) {
|
||||||
|
chan->private = &dw->sd[i];
|
||||||
|
last_dw = NULL;
|
||||||
|
last_bus_id = NULL;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
last_dw = dw;
|
||||||
|
last_bus_id = param;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dw_dma_generic_filter);
|
||||||
|
|
||||||
/* --------------------- Cyclic DMA API extensions -------------------- */
|
/* --------------------- Cyclic DMA API extensions -------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1299,6 +1389,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|||||||
|
|
||||||
retval = ERR_PTR(-EINVAL);
|
retval = ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
if (unlikely(!is_slave_direction(direction)))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
dwc->direction = direction;
|
||||||
|
|
||||||
if (direction == DMA_MEM_TO_DEV)
|
if (direction == DMA_MEM_TO_DEV)
|
||||||
reg_width = __ffs(sconfig->dst_addr_width);
|
reg_width = __ffs(sconfig->dst_addr_width);
|
||||||
else
|
else
|
||||||
@ -1313,8 +1408,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|||||||
goto out_err;
|
goto out_err;
|
||||||
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
|
|
||||||
goto out_err;
|
|
||||||
|
|
||||||
retval = ERR_PTR(-ENOMEM);
|
retval = ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
@ -1372,20 +1465,14 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|||||||
desc->lli.ctlhi = (period_len >> reg_width);
|
desc->lli.ctlhi = (period_len >> reg_width);
|
||||||
cdesc->desc[i] = desc;
|
cdesc->desc[i] = desc;
|
||||||
|
|
||||||
if (last) {
|
if (last)
|
||||||
last->lli.llp = desc->txd.phys;
|
last->lli.llp = desc->txd.phys;
|
||||||
dma_sync_single_for_device(chan2parent(chan),
|
|
||||||
last->txd.phys, sizeof(last->lli),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
}
|
|
||||||
|
|
||||||
last = desc;
|
last = desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* lets make a cyclic list */
|
/* lets make a cyclic list */
|
||||||
last->lli.llp = cdesc->desc[0]->txd.phys;
|
last->lli.llp = cdesc->desc[0]->txd.phys;
|
||||||
dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
|
|
||||||
sizeof(last->lli), DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
|
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
|
||||||
"period %zu periods %d\n", (unsigned long long)buf_addr,
|
"period %zu periods %d\n", (unsigned long long)buf_addr,
|
||||||
@ -1463,6 +1550,91 @@ static void dw_dma_off(struct dw_dma *dw)
|
|||||||
dw->chan[i].initialized = false;
|
dw->chan[i].initialized = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_OF
|
||||||
|
static struct dw_dma_platform_data *
|
||||||
|
dw_dma_parse_dt(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct device_node *sn, *cn, *np = pdev->dev.of_node;
|
||||||
|
struct dw_dma_platform_data *pdata;
|
||||||
|
struct dw_dma_slave *sd;
|
||||||
|
u32 tmp, arr[4];
|
||||||
|
|
||||||
|
if (!np) {
|
||||||
|
dev_err(&pdev->dev, "Missing DT data\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
||||||
|
if (!pdata)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (of_property_read_bool(np, "is_private"))
|
||||||
|
pdata->is_private = true;
|
||||||
|
|
||||||
|
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
|
||||||
|
pdata->chan_allocation_order = (unsigned char)tmp;
|
||||||
|
|
||||||
|
if (!of_property_read_u32(np, "chan_priority", &tmp))
|
||||||
|
pdata->chan_priority = tmp;
|
||||||
|
|
||||||
|
if (!of_property_read_u32(np, "block_size", &tmp))
|
||||||
|
pdata->block_size = tmp;
|
||||||
|
|
||||||
|
if (!of_property_read_u32(np, "nr_masters", &tmp)) {
|
||||||
|
if (tmp > 4)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pdata->nr_masters = tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!of_property_read_u32_array(np, "data_width", arr,
|
||||||
|
pdata->nr_masters))
|
||||||
|
for (tmp = 0; tmp < pdata->nr_masters; tmp++)
|
||||||
|
pdata->data_width[tmp] = arr[tmp];
|
||||||
|
|
||||||
|
/* parse slave data */
|
||||||
|
sn = of_find_node_by_name(np, "slave_info");
|
||||||
|
if (!sn)
|
||||||
|
return pdata;
|
||||||
|
|
||||||
|
/* calculate number of slaves */
|
||||||
|
tmp = of_get_child_count(sn);
|
||||||
|
if (!tmp)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL);
|
||||||
|
if (!sd)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pdata->sd = sd;
|
||||||
|
pdata->sd_count = tmp;
|
||||||
|
|
||||||
|
for_each_child_of_node(sn, cn) {
|
||||||
|
sd->dma_dev = &pdev->dev;
|
||||||
|
of_property_read_string(cn, "bus_id", &sd->bus_id);
|
||||||
|
of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi);
|
||||||
|
of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo);
|
||||||
|
if (!of_property_read_u32(cn, "src_master", &tmp))
|
||||||
|
sd->src_master = tmp;
|
||||||
|
|
||||||
|
if (!of_property_read_u32(cn, "dst_master", &tmp))
|
||||||
|
sd->dst_master = tmp;
|
||||||
|
sd++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pdata;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline struct dw_dma_platform_data *
|
||||||
|
dw_dma_parse_dt(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int dw_probe(struct platform_device *pdev)
|
static int dw_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct dw_dma_platform_data *pdata;
|
struct dw_dma_platform_data *pdata;
|
||||||
@ -1478,10 +1650,6 @@ static int dw_probe(struct platform_device *pdev)
|
|||||||
int err;
|
int err;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
pdata = dev_get_platdata(&pdev->dev);
|
|
||||||
if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
if (!io)
|
if (!io)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1494,9 +1662,33 @@ static int dw_probe(struct platform_device *pdev)
|
|||||||
if (IS_ERR(regs))
|
if (IS_ERR(regs))
|
||||||
return PTR_ERR(regs);
|
return PTR_ERR(regs);
|
||||||
|
|
||||||
|
/* Apply default dma_mask if needed */
|
||||||
|
if (!pdev->dev.dma_mask) {
|
||||||
|
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
|
||||||
|
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||||
|
}
|
||||||
|
|
||||||
dw_params = dma_read_byaddr(regs, DW_PARAMS);
|
dw_params = dma_read_byaddr(regs, DW_PARAMS);
|
||||||
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
|
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
|
||||||
|
|
||||||
|
dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
|
||||||
|
|
||||||
|
pdata = dev_get_platdata(&pdev->dev);
|
||||||
|
if (!pdata)
|
||||||
|
pdata = dw_dma_parse_dt(pdev);
|
||||||
|
|
||||||
|
if (!pdata && autocfg) {
|
||||||
|
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
||||||
|
if (!pdata)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* Fill platform data with the default values */
|
||||||
|
pdata->is_private = true;
|
||||||
|
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
|
||||||
|
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
|
||||||
|
} else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (autocfg)
|
if (autocfg)
|
||||||
nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
|
nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
|
||||||
else
|
else
|
||||||
@ -1513,6 +1705,8 @@ static int dw_probe(struct platform_device *pdev)
|
|||||||
clk_prepare_enable(dw->clk);
|
clk_prepare_enable(dw->clk);
|
||||||
|
|
||||||
dw->regs = regs;
|
dw->regs = regs;
|
||||||
|
dw->sd = pdata->sd;
|
||||||
|
dw->sd_count = pdata->sd_count;
|
||||||
|
|
||||||
/* get hardware configuration parameters */
|
/* get hardware configuration parameters */
|
||||||
if (autocfg) {
|
if (autocfg) {
|
||||||
@ -1544,6 +1738,14 @@ static int dw_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
platform_set_drvdata(pdev, dw);
|
platform_set_drvdata(pdev, dw);
|
||||||
|
|
||||||
|
/* create a pool of consistent memory blocks for hardware descriptors */
|
||||||
|
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
|
||||||
|
sizeof(struct dw_desc), 4, 0);
|
||||||
|
if (!dw->desc_pool) {
|
||||||
|
dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
|
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dw->dma.channels);
|
INIT_LIST_HEAD(&dw->dma.channels);
|
||||||
@ -1575,7 +1777,7 @@ static int dw_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||||
|
|
||||||
dwc->dw = dw;
|
dwc->direction = DMA_TRANS_NONE;
|
||||||
|
|
||||||
/* hardware configuration */
|
/* hardware configuration */
|
||||||
if (autocfg) {
|
if (autocfg) {
|
||||||
@ -1584,6 +1786,9 @@ static int dw_probe(struct platform_device *pdev)
|
|||||||
dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
|
dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
|
||||||
DWC_PARAMS);
|
DWC_PARAMS);
|
||||||
|
|
||||||
|
dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
|
||||||
|
dwc_params);
|
||||||
|
|
||||||
/* Decode maximum block size for given channel. The
|
/* Decode maximum block size for given channel. The
|
||||||
* stored 4 bit value represents blocks from 0x00 for 3
|
* stored 4 bit value represents blocks from 0x00 for 3
|
||||||
* up to 0x0a for 4095. */
|
* up to 0x0a for 4095. */
|
||||||
@ -1627,8 +1832,8 @@ static int dw_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
dma_writel(dw, CFG, DW_CFG_DMA_EN);
|
dma_writel(dw, CFG, DW_CFG_DMA_EN);
|
||||||
|
|
||||||
printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
|
dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
|
||||||
dev_name(&pdev->dev), nr_channels);
|
nr_channels);
|
||||||
|
|
||||||
dma_async_device_register(&dw->dma);
|
dma_async_device_register(&dw->dma);
|
||||||
|
|
||||||
@ -1658,7 +1863,7 @@ static void dw_shutdown(struct platform_device *pdev)
|
|||||||
{
|
{
|
||||||
struct dw_dma *dw = platform_get_drvdata(pdev);
|
struct dw_dma *dw = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
dw_dma_off(platform_get_drvdata(pdev));
|
dw_dma_off(dw);
|
||||||
clk_disable_unprepare(dw->clk);
|
clk_disable_unprepare(dw->clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1667,7 +1872,7 @@ static int dw_suspend_noirq(struct device *dev)
|
|||||||
struct platform_device *pdev = to_platform_device(dev);
|
struct platform_device *pdev = to_platform_device(dev);
|
||||||
struct dw_dma *dw = platform_get_drvdata(pdev);
|
struct dw_dma *dw = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
dw_dma_off(platform_get_drvdata(pdev));
|
dw_dma_off(dw);
|
||||||
clk_disable_unprepare(dw->clk);
|
clk_disable_unprepare(dw->clk);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1680,6 +1885,7 @@ static int dw_resume_noirq(struct device *dev)
|
|||||||
|
|
||||||
clk_prepare_enable(dw->clk);
|
clk_prepare_enable(dw->clk);
|
||||||
dma_writel(dw, CFG, DW_CFG_DMA_EN);
|
dma_writel(dw, CFG, DW_CFG_DMA_EN);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1700,7 +1906,13 @@ static const struct of_device_id dw_dma_id_table[] = {
|
|||||||
MODULE_DEVICE_TABLE(of, dw_dma_id_table);
|
MODULE_DEVICE_TABLE(of, dw_dma_id_table);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static const struct platform_device_id dw_dma_ids[] = {
|
||||||
|
{ "INTL9C60", 0 },
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
|
||||||
static struct platform_driver dw_driver = {
|
static struct platform_driver dw_driver = {
|
||||||
|
.probe = dw_probe,
|
||||||
.remove = dw_remove,
|
.remove = dw_remove,
|
||||||
.shutdown = dw_shutdown,
|
.shutdown = dw_shutdown,
|
||||||
.driver = {
|
.driver = {
|
||||||
@ -1708,11 +1920,12 @@ static struct platform_driver dw_driver = {
|
|||||||
.pm = &dw_dev_pm_ops,
|
.pm = &dw_dev_pm_ops,
|
||||||
.of_match_table = of_match_ptr(dw_dma_id_table),
|
.of_match_table = of_match_ptr(dw_dma_id_table),
|
||||||
},
|
},
|
||||||
|
.id_table = dw_dma_ids,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init dw_init(void)
|
static int __init dw_init(void)
|
||||||
{
|
{
|
||||||
return platform_driver_probe(&dw_driver, dw_probe);
|
return platform_driver_register(&dw_driver);
|
||||||
}
|
}
|
||||||
subsys_initcall(dw_init);
|
subsys_initcall(dw_init);
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
* published by the Free Software Foundation.
|
* published by the Free Software Foundation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/dmaengine.h>
|
||||||
#include <linux/dw_dmac.h>
|
#include <linux/dw_dmac.h>
|
||||||
|
|
||||||
#define DW_DMA_MAX_NR_CHANNELS 8
|
#define DW_DMA_MAX_NR_CHANNELS 8
|
||||||
@ -184,15 +185,15 @@ enum dw_dmac_flags {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct dw_dma_chan {
|
struct dw_dma_chan {
|
||||||
struct dma_chan chan;
|
struct dma_chan chan;
|
||||||
void __iomem *ch_regs;
|
void __iomem *ch_regs;
|
||||||
u8 mask;
|
u8 mask;
|
||||||
u8 priority;
|
u8 priority;
|
||||||
bool paused;
|
enum dma_transfer_direction direction;
|
||||||
bool initialized;
|
bool paused;
|
||||||
|
bool initialized;
|
||||||
|
|
||||||
/* software emulation of the LLP transfers */
|
/* software emulation of the LLP transfers */
|
||||||
struct list_head *tx_list;
|
|
||||||
struct list_head *tx_node_active;
|
struct list_head *tx_node_active;
|
||||||
|
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
@ -202,6 +203,7 @@ struct dw_dma_chan {
|
|||||||
struct list_head active_list;
|
struct list_head active_list;
|
||||||
struct list_head queue;
|
struct list_head queue;
|
||||||
struct list_head free_list;
|
struct list_head free_list;
|
||||||
|
u32 residue;
|
||||||
struct dw_cyclic_desc *cdesc;
|
struct dw_cyclic_desc *cdesc;
|
||||||
|
|
||||||
unsigned int descs_allocated;
|
unsigned int descs_allocated;
|
||||||
@ -212,9 +214,6 @@ struct dw_dma_chan {
|
|||||||
|
|
||||||
/* configuration passed via DMA_SLAVE_CONFIG */
|
/* configuration passed via DMA_SLAVE_CONFIG */
|
||||||
struct dma_slave_config dma_sconfig;
|
struct dma_slave_config dma_sconfig;
|
||||||
|
|
||||||
/* backlink to dw_dma */
|
|
||||||
struct dw_dma *dw;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct dw_dma_chan_regs __iomem *
|
static inline struct dw_dma_chan_regs __iomem *
|
||||||
@ -236,9 +235,14 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
|
|||||||
struct dw_dma {
|
struct dw_dma {
|
||||||
struct dma_device dma;
|
struct dma_device dma;
|
||||||
void __iomem *regs;
|
void __iomem *regs;
|
||||||
|
struct dma_pool *desc_pool;
|
||||||
struct tasklet_struct tasklet;
|
struct tasklet_struct tasklet;
|
||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
|
|
||||||
|
/* slave information */
|
||||||
|
struct dw_dma_slave *sd;
|
||||||
|
unsigned int sd_count;
|
||||||
|
|
||||||
u8 all_chan_mask;
|
u8 all_chan_mask;
|
||||||
|
|
||||||
/* hardware configuration */
|
/* hardware configuration */
|
||||||
@ -293,8 +297,11 @@ struct dw_desc {
|
|||||||
struct list_head tx_list;
|
struct list_head tx_list;
|
||||||
struct dma_async_tx_descriptor txd;
|
struct dma_async_tx_descriptor txd;
|
||||||
size_t len;
|
size_t len;
|
||||||
|
size_t total_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
|
||||||
|
|
||||||
static inline struct dw_desc *
|
static inline struct dw_desc *
|
||||||
txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
|
txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
|
||||||
{
|
{
|
||||||
|
@ -69,9 +69,7 @@ struct edma_chan {
|
|||||||
int ch_num;
|
int ch_num;
|
||||||
bool alloced;
|
bool alloced;
|
||||||
int slot[EDMA_MAX_SLOTS];
|
int slot[EDMA_MAX_SLOTS];
|
||||||
dma_addr_t addr;
|
struct dma_slave_config cfg;
|
||||||
int addr_width;
|
|
||||||
int maxburst;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct edma_cc {
|
struct edma_cc {
|
||||||
@ -178,29 +176,14 @@ static int edma_terminate_all(struct edma_chan *echan)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int edma_slave_config(struct edma_chan *echan,
|
static int edma_slave_config(struct edma_chan *echan,
|
||||||
struct dma_slave_config *config)
|
struct dma_slave_config *cfg)
|
||||||
{
|
{
|
||||||
if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) ||
|
if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
|
||||||
(config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
|
cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (config->direction == DMA_MEM_TO_DEV) {
|
memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
|
||||||
if (config->dst_addr)
|
|
||||||
echan->addr = config->dst_addr;
|
|
||||||
if (config->dst_addr_width)
|
|
||||||
echan->addr_width = config->dst_addr_width;
|
|
||||||
if (config->dst_maxburst)
|
|
||||||
echan->maxburst = config->dst_maxburst;
|
|
||||||
} else if (config->direction == DMA_DEV_TO_MEM) {
|
|
||||||
if (config->src_addr)
|
|
||||||
echan->addr = config->src_addr;
|
|
||||||
if (config->src_addr_width)
|
|
||||||
echan->addr_width = config->src_addr_width;
|
|
||||||
if (config->src_maxburst)
|
|
||||||
echan->maxburst = config->src_maxburst;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -235,6 +218,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||||||
struct edma_chan *echan = to_edma_chan(chan);
|
struct edma_chan *echan = to_edma_chan(chan);
|
||||||
struct device *dev = chan->device->dev;
|
struct device *dev = chan->device->dev;
|
||||||
struct edma_desc *edesc;
|
struct edma_desc *edesc;
|
||||||
|
dma_addr_t dev_addr;
|
||||||
|
enum dma_slave_buswidth dev_width;
|
||||||
|
u32 burst;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
int acnt, bcnt, ccnt, src, dst, cidx;
|
int acnt, bcnt, ccnt, src, dst, cidx;
|
||||||
@ -243,7 +229,20 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||||||
if (unlikely(!echan || !sgl || !sg_len))
|
if (unlikely(!echan || !sgl || !sg_len))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
|
if (direction == DMA_DEV_TO_MEM) {
|
||||||
|
dev_addr = echan->cfg.src_addr;
|
||||||
|
dev_width = echan->cfg.src_addr_width;
|
||||||
|
burst = echan->cfg.src_maxburst;
|
||||||
|
} else if (direction == DMA_MEM_TO_DEV) {
|
||||||
|
dev_addr = echan->cfg.dst_addr;
|
||||||
|
dev_width = echan->cfg.dst_addr_width;
|
||||||
|
burst = echan->cfg.dst_maxburst;
|
||||||
|
} else {
|
||||||
|
dev_err(dev, "%s: bad direction?\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
|
||||||
dev_err(dev, "Undefined slave buswidth\n");
|
dev_err(dev, "Undefined slave buswidth\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -275,14 +274,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
acnt = echan->addr_width;
|
acnt = dev_width;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the maxburst is equal to the fifo width, use
|
* If the maxburst is equal to the fifo width, use
|
||||||
* A-synced transfers. This allows for large contiguous
|
* A-synced transfers. This allows for large contiguous
|
||||||
* buffer transfers using only one PaRAM set.
|
* buffer transfers using only one PaRAM set.
|
||||||
*/
|
*/
|
||||||
if (echan->maxburst == 1) {
|
if (burst == 1) {
|
||||||
edesc->absync = false;
|
edesc->absync = false;
|
||||||
ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
|
ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
|
||||||
bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
|
bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
|
||||||
@ -302,7 +301,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||||||
*/
|
*/
|
||||||
} else {
|
} else {
|
||||||
edesc->absync = true;
|
edesc->absync = true;
|
||||||
bcnt = echan->maxburst;
|
bcnt = burst;
|
||||||
ccnt = sg_dma_len(sg) / (acnt * bcnt);
|
ccnt = sg_dma_len(sg) / (acnt * bcnt);
|
||||||
if (ccnt > (SZ_64K - 1)) {
|
if (ccnt > (SZ_64K - 1)) {
|
||||||
dev_err(dev, "Exceeded max SG segment size\n");
|
dev_err(dev, "Exceeded max SG segment size\n");
|
||||||
@ -313,13 +312,13 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||||||
|
|
||||||
if (direction == DMA_MEM_TO_DEV) {
|
if (direction == DMA_MEM_TO_DEV) {
|
||||||
src = sg_dma_address(sg);
|
src = sg_dma_address(sg);
|
||||||
dst = echan->addr;
|
dst = dev_addr;
|
||||||
src_bidx = acnt;
|
src_bidx = acnt;
|
||||||
src_cidx = cidx;
|
src_cidx = cidx;
|
||||||
dst_bidx = 0;
|
dst_bidx = 0;
|
||||||
dst_cidx = 0;
|
dst_cidx = 0;
|
||||||
} else {
|
} else {
|
||||||
src = echan->addr;
|
src = dev_addr;
|
||||||
dst = sg_dma_address(sg);
|
dst = sg_dma_address(sg);
|
||||||
src_bidx = 0;
|
src_bidx = 0;
|
||||||
src_cidx = 0;
|
src_cidx = 0;
|
||||||
@ -621,13 +620,11 @@ static struct platform_device *pdev0, *pdev1;
|
|||||||
static const struct platform_device_info edma_dev_info0 = {
|
static const struct platform_device_info edma_dev_info0 = {
|
||||||
.name = "edma-dma-engine",
|
.name = "edma-dma-engine",
|
||||||
.id = 0,
|
.id = 0,
|
||||||
.dma_mask = DMA_BIT_MASK(32),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct platform_device_info edma_dev_info1 = {
|
static const struct platform_device_info edma_dev_info1 = {
|
||||||
.name = "edma-dma-engine",
|
.name = "edma-dma-engine",
|
||||||
.id = 1,
|
.id = 1,
|
||||||
.dma_mask = DMA_BIT_MASK(32),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int edma_init(void)
|
static int edma_init(void)
|
||||||
@ -641,6 +638,8 @@ static int edma_init(void)
|
|||||||
ret = PTR_ERR(pdev0);
|
ret = PTR_ERR(pdev0);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
|
||||||
|
pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (EDMA_CTLRS == 2) {
|
if (EDMA_CTLRS == 2) {
|
||||||
@ -650,6 +649,8 @@ static int edma_init(void)
|
|||||||
platform_device_unregister(pdev0);
|
platform_device_unregister(pdev0);
|
||||||
ret = PTR_ERR(pdev1);
|
ret = PTR_ERR(pdev1);
|
||||||
}
|
}
|
||||||
|
pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
|
||||||
|
pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -903,8 +903,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
switch (data->port) {
|
switch (data->port) {
|
||||||
case EP93XX_DMA_SSP:
|
case EP93XX_DMA_SSP:
|
||||||
case EP93XX_DMA_IDE:
|
case EP93XX_DMA_IDE:
|
||||||
if (data->direction != DMA_MEM_TO_DEV &&
|
if (!is_slave_direction(data->direction))
|
||||||
data->direction != DMA_DEV_TO_MEM)
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -833,14 +833,14 @@ int ioat_dma_self_test(struct ioatdma_device *device)
|
|||||||
|
|
||||||
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
|
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
|
||||||
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
|
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
|
||||||
flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
|
flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
|
||||||
DMA_PREP_INTERRUPT;
|
DMA_PREP_INTERRUPT;
|
||||||
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
|
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
|
||||||
IOAT_TEST_SIZE, flags);
|
IOAT_TEST_SIZE, flags);
|
||||||
if (!tx) {
|
if (!tx) {
|
||||||
dev_err(dev, "Self-test prep failed, disabling\n");
|
dev_err(dev, "Self-test prep failed, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto unmap_dma;
|
||||||
}
|
}
|
||||||
|
|
||||||
async_tx_ack(tx);
|
async_tx_ack(tx);
|
||||||
@ -851,7 +851,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
|
|||||||
if (cookie < 0) {
|
if (cookie < 0) {
|
||||||
dev_err(dev, "Self-test setup failed, disabling\n");
|
dev_err(dev, "Self-test setup failed, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto unmap_dma;
|
||||||
}
|
}
|
||||||
dma->device_issue_pending(dma_chan);
|
dma->device_issue_pending(dma_chan);
|
||||||
|
|
||||||
@ -862,7 +862,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
|
|||||||
!= DMA_SUCCESS) {
|
!= DMA_SUCCESS) {
|
||||||
dev_err(dev, "Self-test copy timed out, disabling\n");
|
dev_err(dev, "Self-test copy timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto unmap_dma;
|
||||||
}
|
}
|
||||||
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
|
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
|
||||||
dev_err(dev, "Self-test copy failed compare, disabling\n");
|
dev_err(dev, "Self-test copy failed compare, disabling\n");
|
||||||
@ -870,6 +870,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
|
|||||||
goto free_resources;
|
goto free_resources;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unmap_dma:
|
||||||
|
dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
|
||||||
|
dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
|
||||||
free_resources:
|
free_resources:
|
||||||
dma->device_free_chan_resources(dma_chan);
|
dma->device_free_chan_resources(dma_chan);
|
||||||
out:
|
out:
|
||||||
|
@ -97,6 +97,7 @@ struct ioat_chan_common {
|
|||||||
#define IOAT_KOBJ_INIT_FAIL 3
|
#define IOAT_KOBJ_INIT_FAIL 3
|
||||||
#define IOAT_RESHAPE_PENDING 4
|
#define IOAT_RESHAPE_PENDING 4
|
||||||
#define IOAT_RUN 5
|
#define IOAT_RUN 5
|
||||||
|
#define IOAT_CHAN_ACTIVE 6
|
||||||
struct timer_list timer;
|
struct timer_list timer;
|
||||||
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
|
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
|
||||||
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
|
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
|
||||||
|
@ -269,61 +269,22 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
|
|||||||
__ioat2_restart_chan(ioat);
|
__ioat2_restart_chan(ioat);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ioat2_timer_event(unsigned long data)
|
static void check_active(struct ioat2_dma_chan *ioat)
|
||||||
{
|
{
|
||||||
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
|
|
||||||
struct ioat_chan_common *chan = &ioat->base;
|
struct ioat_chan_common *chan = &ioat->base;
|
||||||
|
|
||||||
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
|
if (ioat2_ring_active(ioat)) {
|
||||||
dma_addr_t phys_complete;
|
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
||||||
u64 status;
|
return;
|
||||||
|
}
|
||||||
status = ioat_chansts(chan);
|
|
||||||
|
|
||||||
/* when halted due to errors check for channel
|
|
||||||
* programming errors before advancing the completion state
|
|
||||||
*/
|
|
||||||
if (is_ioat_halted(status)) {
|
|
||||||
u32 chanerr;
|
|
||||||
|
|
||||||
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
|
||||||
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
|
|
||||||
__func__, chanerr);
|
|
||||||
if (test_bit(IOAT_RUN, &chan->state))
|
|
||||||
BUG_ON(is_ioat_bug(chanerr));
|
|
||||||
else /* we never got off the ground */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if we haven't made progress and we have already
|
|
||||||
* acknowledged a pending completion once, then be more
|
|
||||||
* forceful with a restart
|
|
||||||
*/
|
|
||||||
spin_lock_bh(&chan->cleanup_lock);
|
|
||||||
if (ioat_cleanup_preamble(chan, &phys_complete)) {
|
|
||||||
__cleanup(ioat, phys_complete);
|
|
||||||
} else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
|
|
||||||
spin_lock_bh(&ioat->prep_lock);
|
|
||||||
ioat2_restart_channel(ioat);
|
|
||||||
spin_unlock_bh(&ioat->prep_lock);
|
|
||||||
} else {
|
|
||||||
set_bit(IOAT_COMPLETION_ACK, &chan->state);
|
|
||||||
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&chan->cleanup_lock);
|
|
||||||
} else {
|
|
||||||
u16 active;
|
|
||||||
|
|
||||||
|
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
|
||||||
|
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
|
||||||
|
else if (ioat->alloc_order > ioat_get_alloc_order()) {
|
||||||
/* if the ring is idle, empty, and oversized try to step
|
/* if the ring is idle, empty, and oversized try to step
|
||||||
* down the size
|
* down the size
|
||||||
*/
|
*/
|
||||||
spin_lock_bh(&chan->cleanup_lock);
|
reshape_ring(ioat, ioat->alloc_order - 1);
|
||||||
spin_lock_bh(&ioat->prep_lock);
|
|
||||||
active = ioat2_ring_active(ioat);
|
|
||||||
if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
|
|
||||||
reshape_ring(ioat, ioat->alloc_order-1);
|
|
||||||
spin_unlock_bh(&ioat->prep_lock);
|
|
||||||
spin_unlock_bh(&chan->cleanup_lock);
|
|
||||||
|
|
||||||
/* keep shrinking until we get back to our minimum
|
/* keep shrinking until we get back to our minimum
|
||||||
* default size
|
* default size
|
||||||
@ -331,6 +292,60 @@ void ioat2_timer_event(unsigned long data)
|
|||||||
if (ioat->alloc_order > ioat_get_alloc_order())
|
if (ioat->alloc_order > ioat_get_alloc_order())
|
||||||
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
|
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void ioat2_timer_event(unsigned long data)
|
||||||
|
{
|
||||||
|
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
|
||||||
|
struct ioat_chan_common *chan = &ioat->base;
|
||||||
|
dma_addr_t phys_complete;
|
||||||
|
u64 status;
|
||||||
|
|
||||||
|
status = ioat_chansts(chan);
|
||||||
|
|
||||||
|
/* when halted due to errors check for channel
|
||||||
|
* programming errors before advancing the completion state
|
||||||
|
*/
|
||||||
|
if (is_ioat_halted(status)) {
|
||||||
|
u32 chanerr;
|
||||||
|
|
||||||
|
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||||
|
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
|
||||||
|
__func__, chanerr);
|
||||||
|
if (test_bit(IOAT_RUN, &chan->state))
|
||||||
|
BUG_ON(is_ioat_bug(chanerr));
|
||||||
|
else /* we never got off the ground */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if we haven't made progress and we have already
|
||||||
|
* acknowledged a pending completion once, then be more
|
||||||
|
* forceful with a restart
|
||||||
|
*/
|
||||||
|
spin_lock_bh(&chan->cleanup_lock);
|
||||||
|
if (ioat_cleanup_preamble(chan, &phys_complete))
|
||||||
|
__cleanup(ioat, phys_complete);
|
||||||
|
else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
|
||||||
|
spin_lock_bh(&ioat->prep_lock);
|
||||||
|
ioat2_restart_channel(ioat);
|
||||||
|
spin_unlock_bh(&ioat->prep_lock);
|
||||||
|
spin_unlock_bh(&chan->cleanup_lock);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
set_bit(IOAT_COMPLETION_ACK, &chan->state);
|
||||||
|
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (ioat2_ring_active(ioat))
|
||||||
|
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
||||||
|
else {
|
||||||
|
spin_lock_bh(&ioat->prep_lock);
|
||||||
|
check_active(ioat);
|
||||||
|
spin_unlock_bh(&ioat->prep_lock);
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&chan->cleanup_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ioat2_reset_hw(struct ioat_chan_common *chan)
|
static int ioat2_reset_hw(struct ioat_chan_common *chan)
|
||||||
@ -404,7 +419,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
|
|||||||
cookie = dma_cookie_assign(tx);
|
cookie = dma_cookie_assign(tx);
|
||||||
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
|
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
|
||||||
|
|
||||||
if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
|
if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
|
||||||
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
||||||
|
|
||||||
/* make descriptor updates visible before advancing ioat->head,
|
/* make descriptor updates visible before advancing ioat->head,
|
||||||
|
@ -342,61 +342,22 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
|
|||||||
__ioat2_restart_chan(ioat);
|
__ioat2_restart_chan(ioat);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ioat3_timer_event(unsigned long data)
|
static void check_active(struct ioat2_dma_chan *ioat)
|
||||||
{
|
{
|
||||||
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
|
|
||||||
struct ioat_chan_common *chan = &ioat->base;
|
struct ioat_chan_common *chan = &ioat->base;
|
||||||
|
|
||||||
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
|
if (ioat2_ring_active(ioat)) {
|
||||||
dma_addr_t phys_complete;
|
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
||||||
u64 status;
|
return;
|
||||||
|
}
|
||||||
status = ioat_chansts(chan);
|
|
||||||
|
|
||||||
/* when halted due to errors check for channel
|
|
||||||
* programming errors before advancing the completion state
|
|
||||||
*/
|
|
||||||
if (is_ioat_halted(status)) {
|
|
||||||
u32 chanerr;
|
|
||||||
|
|
||||||
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
|
||||||
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
|
|
||||||
__func__, chanerr);
|
|
||||||
if (test_bit(IOAT_RUN, &chan->state))
|
|
||||||
BUG_ON(is_ioat_bug(chanerr));
|
|
||||||
else /* we never got off the ground */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if we haven't made progress and we have already
|
|
||||||
* acknowledged a pending completion once, then be more
|
|
||||||
* forceful with a restart
|
|
||||||
*/
|
|
||||||
spin_lock_bh(&chan->cleanup_lock);
|
|
||||||
if (ioat_cleanup_preamble(chan, &phys_complete))
|
|
||||||
__cleanup(ioat, phys_complete);
|
|
||||||
else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
|
|
||||||
spin_lock_bh(&ioat->prep_lock);
|
|
||||||
ioat3_restart_channel(ioat);
|
|
||||||
spin_unlock_bh(&ioat->prep_lock);
|
|
||||||
} else {
|
|
||||||
set_bit(IOAT_COMPLETION_ACK, &chan->state);
|
|
||||||
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&chan->cleanup_lock);
|
|
||||||
} else {
|
|
||||||
u16 active;
|
|
||||||
|
|
||||||
|
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
|
||||||
|
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
|
||||||
|
else if (ioat->alloc_order > ioat_get_alloc_order()) {
|
||||||
/* if the ring is idle, empty, and oversized try to step
|
/* if the ring is idle, empty, and oversized try to step
|
||||||
* down the size
|
* down the size
|
||||||
*/
|
*/
|
||||||
spin_lock_bh(&chan->cleanup_lock);
|
reshape_ring(ioat, ioat->alloc_order - 1);
|
||||||
spin_lock_bh(&ioat->prep_lock);
|
|
||||||
active = ioat2_ring_active(ioat);
|
|
||||||
if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
|
|
||||||
reshape_ring(ioat, ioat->alloc_order-1);
|
|
||||||
spin_unlock_bh(&ioat->prep_lock);
|
|
||||||
spin_unlock_bh(&chan->cleanup_lock);
|
|
||||||
|
|
||||||
/* keep shrinking until we get back to our minimum
|
/* keep shrinking until we get back to our minimum
|
||||||
* default size
|
* default size
|
||||||
@ -404,6 +365,60 @@ static void ioat3_timer_event(unsigned long data)
|
|||||||
if (ioat->alloc_order > ioat_get_alloc_order())
|
if (ioat->alloc_order > ioat_get_alloc_order())
|
||||||
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
|
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ioat3_timer_event(unsigned long data)
|
||||||
|
{
|
||||||
|
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
|
||||||
|
struct ioat_chan_common *chan = &ioat->base;
|
||||||
|
dma_addr_t phys_complete;
|
||||||
|
u64 status;
|
||||||
|
|
||||||
|
status = ioat_chansts(chan);
|
||||||
|
|
||||||
|
/* when halted due to errors check for channel
|
||||||
|
* programming errors before advancing the completion state
|
||||||
|
*/
|
||||||
|
if (is_ioat_halted(status)) {
|
||||||
|
u32 chanerr;
|
||||||
|
|
||||||
|
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||||
|
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
|
||||||
|
__func__, chanerr);
|
||||||
|
if (test_bit(IOAT_RUN, &chan->state))
|
||||||
|
BUG_ON(is_ioat_bug(chanerr));
|
||||||
|
else /* we never got off the ground */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if we haven't made progress and we have already
|
||||||
|
* acknowledged a pending completion once, then be more
|
||||||
|
* forceful with a restart
|
||||||
|
*/
|
||||||
|
spin_lock_bh(&chan->cleanup_lock);
|
||||||
|
if (ioat_cleanup_preamble(chan, &phys_complete))
|
||||||
|
__cleanup(ioat, phys_complete);
|
||||||
|
else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
|
||||||
|
spin_lock_bh(&ioat->prep_lock);
|
||||||
|
ioat3_restart_channel(ioat);
|
||||||
|
spin_unlock_bh(&ioat->prep_lock);
|
||||||
|
spin_unlock_bh(&chan->cleanup_lock);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
set_bit(IOAT_COMPLETION_ACK, &chan->state);
|
||||||
|
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (ioat2_ring_active(ioat))
|
||||||
|
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
||||||
|
else {
|
||||||
|
spin_lock_bh(&ioat->prep_lock);
|
||||||
|
check_active(ioat);
|
||||||
|
spin_unlock_bh(&ioat->prep_lock);
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&chan->cleanup_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum dma_status
|
static enum dma_status
|
||||||
@ -863,6 +878,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
unsigned long tmo;
|
unsigned long tmo;
|
||||||
struct device *dev = &device->pdev->dev;
|
struct device *dev = &device->pdev->dev;
|
||||||
struct dma_device *dma = &device->common;
|
struct dma_device *dma = &device->common;
|
||||||
|
u8 op = 0;
|
||||||
|
|
||||||
dev_dbg(dev, "%s\n", __func__);
|
dev_dbg(dev, "%s\n", __func__);
|
||||||
|
|
||||||
@ -908,18 +924,22 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* test xor */
|
/* test xor */
|
||||||
|
op = IOAT_OP_XOR;
|
||||||
|
|
||||||
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
||||||
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
|
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
||||||
IOAT_NUM_SRC_TEST, PAGE_SIZE,
|
IOAT_NUM_SRC_TEST, PAGE_SIZE,
|
||||||
DMA_PREP_INTERRUPT);
|
DMA_PREP_INTERRUPT |
|
||||||
|
DMA_COMPL_SKIP_SRC_UNMAP |
|
||||||
|
DMA_COMPL_SKIP_DEST_UNMAP);
|
||||||
|
|
||||||
if (!tx) {
|
if (!tx) {
|
||||||
dev_err(dev, "Self-test xor prep failed\n");
|
dev_err(dev, "Self-test xor prep failed\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
async_tx_ack(tx);
|
async_tx_ack(tx);
|
||||||
@ -930,7 +950,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (cookie < 0) {
|
if (cookie < 0) {
|
||||||
dev_err(dev, "Self-test xor setup failed\n");
|
dev_err(dev, "Self-test xor setup failed\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
dma->device_issue_pending(dma_chan);
|
dma->device_issue_pending(dma_chan);
|
||||||
|
|
||||||
@ -939,9 +959,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
||||||
dev_err(dev, "Self-test xor timed out\n");
|
dev_err(dev, "Self-test xor timed out\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
|
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
||||||
|
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
|
||||||
|
|
||||||
dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
|
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
|
||||||
u32 *ptr = page_address(dest);
|
u32 *ptr = page_address(dest);
|
||||||
@ -957,6 +981,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
|
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
|
|
||||||
|
op = IOAT_OP_XOR_VAL;
|
||||||
|
|
||||||
/* validate the sources with the destintation page */
|
/* validate the sources with the destintation page */
|
||||||
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
||||||
xor_val_srcs[i] = xor_srcs[i];
|
xor_val_srcs[i] = xor_srcs[i];
|
||||||
@ -969,11 +995,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
|
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
|
||||||
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
|
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
|
||||||
&xor_val_result, DMA_PREP_INTERRUPT);
|
&xor_val_result, DMA_PREP_INTERRUPT |
|
||||||
|
DMA_COMPL_SKIP_SRC_UNMAP |
|
||||||
|
DMA_COMPL_SKIP_DEST_UNMAP);
|
||||||
if (!tx) {
|
if (!tx) {
|
||||||
dev_err(dev, "Self-test zero prep failed\n");
|
dev_err(dev, "Self-test zero prep failed\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
async_tx_ack(tx);
|
async_tx_ack(tx);
|
||||||
@ -984,7 +1012,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (cookie < 0) {
|
if (cookie < 0) {
|
||||||
dev_err(dev, "Self-test zero setup failed\n");
|
dev_err(dev, "Self-test zero setup failed\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
dma->device_issue_pending(dma_chan);
|
dma->device_issue_pending(dma_chan);
|
||||||
|
|
||||||
@ -993,9 +1021,12 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
||||||
dev_err(dev, "Self-test validate timed out\n");
|
dev_err(dev, "Self-test validate timed out\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
|
||||||
|
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (xor_val_result != 0) {
|
if (xor_val_result != 0) {
|
||||||
dev_err(dev, "Self-test validate failed compare\n");
|
dev_err(dev, "Self-test validate failed compare\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@ -1007,14 +1038,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
goto free_resources;
|
goto free_resources;
|
||||||
|
|
||||||
/* test memset */
|
/* test memset */
|
||||||
|
op = IOAT_OP_FILL;
|
||||||
|
|
||||||
dma_addr = dma_map_page(dev, dest, 0,
|
dma_addr = dma_map_page(dev, dest, 0,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
|
tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
|
||||||
DMA_PREP_INTERRUPT);
|
DMA_PREP_INTERRUPT |
|
||||||
|
DMA_COMPL_SKIP_SRC_UNMAP |
|
||||||
|
DMA_COMPL_SKIP_DEST_UNMAP);
|
||||||
if (!tx) {
|
if (!tx) {
|
||||||
dev_err(dev, "Self-test memset prep failed\n");
|
dev_err(dev, "Self-test memset prep failed\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
async_tx_ack(tx);
|
async_tx_ack(tx);
|
||||||
@ -1025,7 +1060,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (cookie < 0) {
|
if (cookie < 0) {
|
||||||
dev_err(dev, "Self-test memset setup failed\n");
|
dev_err(dev, "Self-test memset setup failed\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
dma->device_issue_pending(dma_chan);
|
dma->device_issue_pending(dma_chan);
|
||||||
|
|
||||||
@ -1034,9 +1069,11 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
||||||
dev_err(dev, "Self-test memset timed out\n");
|
dev_err(dev, "Self-test memset timed out\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
|
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
|
||||||
u32 *ptr = page_address(dest);
|
u32 *ptr = page_address(dest);
|
||||||
if (ptr[i]) {
|
if (ptr[i]) {
|
||||||
@ -1047,17 +1084,21 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* test for non-zero parity sum */
|
/* test for non-zero parity sum */
|
||||||
|
op = IOAT_OP_XOR_VAL;
|
||||||
|
|
||||||
xor_val_result = 0;
|
xor_val_result = 0;
|
||||||
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
|
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
|
||||||
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
|
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
|
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
|
||||||
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
|
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
|
||||||
&xor_val_result, DMA_PREP_INTERRUPT);
|
&xor_val_result, DMA_PREP_INTERRUPT |
|
||||||
|
DMA_COMPL_SKIP_SRC_UNMAP |
|
||||||
|
DMA_COMPL_SKIP_DEST_UNMAP);
|
||||||
if (!tx) {
|
if (!tx) {
|
||||||
dev_err(dev, "Self-test 2nd zero prep failed\n");
|
dev_err(dev, "Self-test 2nd zero prep failed\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
async_tx_ack(tx);
|
async_tx_ack(tx);
|
||||||
@ -1068,7 +1109,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (cookie < 0) {
|
if (cookie < 0) {
|
||||||
dev_err(dev, "Self-test 2nd zero setup failed\n");
|
dev_err(dev, "Self-test 2nd zero setup failed\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
dma->device_issue_pending(dma_chan);
|
dma->device_issue_pending(dma_chan);
|
||||||
|
|
||||||
@ -1077,15 +1118,31 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
||||||
dev_err(dev, "Self-test 2nd validate timed out\n");
|
dev_err(dev, "Self-test 2nd validate timed out\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (xor_val_result != SUM_CHECK_P_RESULT) {
|
if (xor_val_result != SUM_CHECK_P_RESULT) {
|
||||||
dev_err(dev, "Self-test validate failed compare\n");
|
dev_err(dev, "Self-test validate failed compare\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
|
||||||
|
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
goto free_resources;
|
||||||
|
dma_unmap:
|
||||||
|
if (op == IOAT_OP_XOR) {
|
||||||
|
dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
|
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
||||||
|
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
} else if (op == IOAT_OP_XOR_VAL) {
|
||||||
|
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
|
||||||
|
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
} else if (op == IOAT_OP_FILL)
|
||||||
|
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
free_resources:
|
free_resources:
|
||||||
dma->device_free_chan_resources(dma_chan);
|
dma->device_free_chan_resources(dma_chan);
|
||||||
out:
|
out:
|
||||||
@ -1126,12 +1183,7 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
|
|||||||
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||||
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
|
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||||
|
|
||||||
/* -= IOAT ver.3 workarounds =- */
|
/* clear any pending errors */
|
||||||
/* Write CHANERRMSK_INT with 3E07h to mask out the errors
|
|
||||||
* that can cause stability issues for IOAT ver.3, and clear any
|
|
||||||
* pending errors
|
|
||||||
*/
|
|
||||||
pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
|
|
||||||
err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
|
err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "channel error register unreachable\n");
|
dev_err(&pdev->dev, "channel error register unreachable\n");
|
||||||
@ -1187,6 +1239,26 @@ static bool is_snb_ioat(struct pci_dev *pdev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_ivb_ioat(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
switch (pdev->device) {
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
|
||||||
|
case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
|
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = device->pdev;
|
struct pci_dev *pdev = device->pdev;
|
||||||
@ -1207,7 +1279,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
|
|||||||
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
|
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
|
||||||
dma->device_free_chan_resources = ioat2_free_chan_resources;
|
dma->device_free_chan_resources = ioat2_free_chan_resources;
|
||||||
|
|
||||||
if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
|
if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev))
|
||||||
dma->copy_align = 6;
|
dma->copy_align = 6;
|
||||||
|
|
||||||
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
|
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
|
||||||
|
@ -35,6 +35,17 @@
|
|||||||
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
|
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
|
||||||
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
|
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
|
||||||
|
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
|
||||||
|
|
||||||
int system_has_dca_enabled(struct pci_dev *pdev);
|
int system_has_dca_enabled(struct pci_dev *pdev);
|
||||||
|
|
||||||
struct ioat_dma_descriptor {
|
struct ioat_dma_descriptor {
|
||||||
|
@ -40,17 +40,6 @@ MODULE_VERSION(IOAT_DMA_VERSION);
|
|||||||
MODULE_LICENSE("Dual BSD/GPL");
|
MODULE_LICENSE("Dual BSD/GPL");
|
||||||
MODULE_AUTHOR("Intel Corporation");
|
MODULE_AUTHOR("Intel Corporation");
|
||||||
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
|
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
|
|
||||||
|
|
||||||
static struct pci_device_id ioat_pci_tbl[] = {
|
static struct pci_device_id ioat_pci_tbl[] = {
|
||||||
/* I/OAT v1 platforms */
|
/* I/OAT v1 platforms */
|
||||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
|
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
|
||||||
|
@ -936,7 +936,7 @@ static irqreturn_t iop_adma_err_handler(int irq, void *data)
|
|||||||
struct iop_adma_chan *chan = data;
|
struct iop_adma_chan *chan = data;
|
||||||
unsigned long status = iop_chan_get_status(chan);
|
unsigned long status = iop_chan_get_status(chan);
|
||||||
|
|
||||||
dev_printk(KERN_ERR, chan->device->common.dev,
|
dev_err(chan->device->common.dev,
|
||||||
"error ( %s%s%s%s%s%s%s)\n",
|
"error ( %s%s%s%s%s%s%s)\n",
|
||||||
iop_is_err_int_parity(status, chan) ? "int_parity " : "",
|
iop_is_err_int_parity(status, chan) ? "int_parity " : "",
|
||||||
iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
|
iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
|
||||||
@ -1017,7 +1017,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
|
|||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_SUCCESS) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test copy timed out, disabling\n");
|
"Self-test copy timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1027,7 +1027,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
|
|||||||
dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
|
dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
|
||||||
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
|
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
|
||||||
if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
|
if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test copy failed compare, disabling\n");
|
"Self-test copy failed compare, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1117,7 +1117,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_SUCCESS) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test xor timed out, disabling\n");
|
"Self-test xor timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
|
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
|
||||||
u32 *ptr = page_address(dest);
|
u32 *ptr = page_address(dest);
|
||||||
if (ptr[i] != cmp_word) {
|
if (ptr[i] != cmp_word) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test xor failed compare, disabling\n");
|
"Self-test xor failed compare, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1163,14 +1163,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test zero sum timed out, disabling\n");
|
"Self-test zero sum timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zero_sum_result != 0) {
|
if (zero_sum_result != 0) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test zero sum failed compare, disabling\n");
|
"Self-test zero sum failed compare, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1187,7 +1187,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test memset timed out, disabling\n");
|
"Self-test memset timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1196,7 +1196,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
|
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
|
||||||
u32 *ptr = page_address(dest);
|
u32 *ptr = page_address(dest);
|
||||||
if (ptr[i]) {
|
if (ptr[i]) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test memset failed compare, disabling\n");
|
"Self-test memset failed compare, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1219,14 +1219,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test non-zero sum timed out, disabling\n");
|
"Self-test non-zero sum timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zero_sum_result != 1) {
|
if (zero_sum_result != 1) {
|
||||||
dev_printk(KERN_ERR, dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test non-zero sum failed compare, disabling\n");
|
"Self-test non-zero sum failed compare, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1579,15 +1579,14 @@ static int iop_adma_probe(struct platform_device *pdev)
|
|||||||
goto err_free_iop_chan;
|
goto err_free_iop_chan;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
|
dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s%s)\n",
|
||||||
"( %s%s%s%s%s%s%s)\n",
|
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
|
||||||
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
|
dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
|
||||||
dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
|
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
|
||||||
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
|
dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
|
||||||
dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
|
dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
|
||||||
dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
|
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
|
||||||
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
|
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
|
||||||
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
|
|
||||||
|
|
||||||
dma_async_device_register(dma_dev);
|
dma_async_device_register(dma_dev);
|
||||||
goto out;
|
goto out;
|
||||||
@ -1651,8 +1650,8 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
|
|||||||
/* run the descriptor */
|
/* run the descriptor */
|
||||||
iop_chan_enable(iop_chan);
|
iop_chan_enable(iop_chan);
|
||||||
} else
|
} else
|
||||||
dev_printk(KERN_ERR, iop_chan->device->common.dev,
|
dev_err(iop_chan->device->common.dev,
|
||||||
"failed to allocate null descriptor\n");
|
"failed to allocate null descriptor\n");
|
||||||
spin_unlock_bh(&iop_chan->lock);
|
spin_unlock_bh(&iop_chan->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1704,7 +1703,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
|
|||||||
/* run the descriptor */
|
/* run the descriptor */
|
||||||
iop_chan_enable(iop_chan);
|
iop_chan_enable(iop_chan);
|
||||||
} else
|
} else
|
||||||
dev_printk(KERN_ERR, iop_chan->device->common.dev,
|
dev_err(iop_chan->device->common.dev,
|
||||||
"failed to allocate null descriptor\n");
|
"failed to allocate null descriptor\n");
|
||||||
spin_unlock_bh(&iop_chan->lock);
|
spin_unlock_bh(&iop_chan->lock);
|
||||||
}
|
}
|
||||||
|
@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
|
|||||||
chan->chan_id != IDMAC_IC_7)
|
chan->chan_id != IDMAC_IC_7)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
|
if (!is_slave_direction(direction)) {
|
||||||
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
|
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,6 @@ static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
|
|||||||
struct ipu_irq_bank {
|
struct ipu_irq_bank {
|
||||||
unsigned int control;
|
unsigned int control;
|
||||||
unsigned int status;
|
unsigned int status;
|
||||||
spinlock_t lock;
|
|
||||||
struct ipu *ipu;
|
struct ipu *ipu;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -618,10 +618,8 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
|
|||||||
else if (maxburst == 32)
|
else if (maxburst == 32)
|
||||||
chan->dcmd |= DCMD_BURST32;
|
chan->dcmd |= DCMD_BURST32;
|
||||||
|
|
||||||
if (cfg) {
|
chan->dir = cfg->direction;
|
||||||
chan->dir = cfg->direction;
|
chan->drcmr = cfg->slave_id;
|
||||||
chan->drcmr = cfg->slave_id;
|
|
||||||
}
|
|
||||||
chan->dev_addr = addr;
|
chan->dev_addr = addr;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -210,7 +210,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(mv_chan_to_devp(chan),
|
dev_err(mv_chan_to_devp(chan),
|
||||||
"error: unsupported operation %d.\n",
|
"error: unsupported operation %d\n",
|
||||||
type);
|
type);
|
||||||
BUG();
|
BUG();
|
||||||
return;
|
return;
|
||||||
@ -828,28 +828,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
|
|||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
val = __raw_readl(XOR_CONFIG(chan));
|
val = __raw_readl(XOR_CONFIG(chan));
|
||||||
dev_err(mv_chan_to_devp(chan),
|
dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
|
||||||
"config 0x%08x.\n", val);
|
|
||||||
|
|
||||||
val = __raw_readl(XOR_ACTIVATION(chan));
|
val = __raw_readl(XOR_ACTIVATION(chan));
|
||||||
dev_err(mv_chan_to_devp(chan),
|
dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
|
||||||
"activation 0x%08x.\n", val);
|
|
||||||
|
|
||||||
val = __raw_readl(XOR_INTR_CAUSE(chan));
|
val = __raw_readl(XOR_INTR_CAUSE(chan));
|
||||||
dev_err(mv_chan_to_devp(chan),
|
dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
|
||||||
"intr cause 0x%08x.\n", val);
|
|
||||||
|
|
||||||
val = __raw_readl(XOR_INTR_MASK(chan));
|
val = __raw_readl(XOR_INTR_MASK(chan));
|
||||||
dev_err(mv_chan_to_devp(chan),
|
dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
|
||||||
"intr mask 0x%08x.\n", val);
|
|
||||||
|
|
||||||
val = __raw_readl(XOR_ERROR_CAUSE(chan));
|
val = __raw_readl(XOR_ERROR_CAUSE(chan));
|
||||||
dev_err(mv_chan_to_devp(chan),
|
dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
|
||||||
"error cause 0x%08x.\n", val);
|
|
||||||
|
|
||||||
val = __raw_readl(XOR_ERROR_ADDR(chan));
|
val = __raw_readl(XOR_ERROR_ADDR(chan));
|
||||||
dev_err(mv_chan_to_devp(chan),
|
dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
|
||||||
"error addr 0x%08x.\n", val);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
|
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
|
||||||
@ -862,7 +856,7 @@ static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
|
|||||||
}
|
}
|
||||||
|
|
||||||
dev_err(mv_chan_to_devp(chan),
|
dev_err(mv_chan_to_devp(chan),
|
||||||
"error on chan %d. intr cause 0x%08x.\n",
|
"error on chan %d. intr cause 0x%08x\n",
|
||||||
chan->idx, intr_cause);
|
chan->idx, intr_cause);
|
||||||
|
|
||||||
mv_dump_xor_regs(chan);
|
mv_dump_xor_regs(chan);
|
||||||
@ -1052,9 +1046,8 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
|||||||
u32 *ptr = page_address(dest);
|
u32 *ptr = page_address(dest);
|
||||||
if (ptr[i] != cmp_word) {
|
if (ptr[i] != cmp_word) {
|
||||||
dev_err(dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test xor failed compare, disabling."
|
"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
|
||||||
" index %d, data %x, expected %x\n", i,
|
i, ptr[i], cmp_word);
|
||||||
ptr[i], cmp_word);
|
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
}
|
}
|
||||||
@ -1194,12 +1187,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
|||||||
goto err_free_irq;
|
goto err_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(&pdev->dev, "Marvell XOR: "
|
dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n",
|
||||||
"( %s%s%s%s)\n",
|
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
|
||||||
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
|
dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
|
||||||
dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
|
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
|
||||||
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
|
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
|
||||||
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
|
|
||||||
|
|
||||||
dma_async_device_register(dma_dev);
|
dma_async_device_register(dma_dev);
|
||||||
return mv_chan;
|
return mv_chan;
|
||||||
@ -1253,7 +1245,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|||||||
struct resource *res;
|
struct resource *res;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
dev_notice(&pdev->dev, "Marvell XOR driver\n");
|
dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
|
||||||
|
|
||||||
xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
|
xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
|
||||||
if (!xordev)
|
if (!xordev)
|
||||||
|
@ -109,7 +109,7 @@ struct mxs_dma_chan {
|
|||||||
struct dma_chan chan;
|
struct dma_chan chan;
|
||||||
struct dma_async_tx_descriptor desc;
|
struct dma_async_tx_descriptor desc;
|
||||||
struct tasklet_struct tasklet;
|
struct tasklet_struct tasklet;
|
||||||
int chan_irq;
|
unsigned int chan_irq;
|
||||||
struct mxs_dma_ccw *ccw;
|
struct mxs_dma_ccw *ccw;
|
||||||
dma_addr_t ccw_phys;
|
dma_addr_t ccw_phys;
|
||||||
int desc_count;
|
int desc_count;
|
||||||
@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
|
|||||||
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
||||||
struct mxs_dma_ccw *ccw;
|
struct mxs_dma_ccw *ccw;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int i, j;
|
u32 i, j;
|
||||||
u32 *pio;
|
u32 *pio;
|
||||||
bool append = flags & DMA_PREP_INTERRUPT;
|
bool append = flags & DMA_PREP_INTERRUPT;
|
||||||
int idx = append ? mxs_chan->desc_count : 0;
|
int idx = append ? mxs_chan->desc_count : 0;
|
||||||
@ -537,8 +537,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
|
|||||||
{
|
{
|
||||||
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
|
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
|
||||||
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
||||||
int num_periods = buf_len / period_len;
|
u32 num_periods = buf_len / period_len;
|
||||||
int i = 0, buf = 0;
|
u32 i = 0, buf = 0;
|
||||||
|
|
||||||
if (mxs_chan->status == DMA_IN_PROGRESS)
|
if (mxs_chan->status == DMA_IN_PROGRESS)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
267
drivers/dma/of-dma.c
Normal file
267
drivers/dma/of-dma.c
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
/*
|
||||||
|
* Device tree helpers for DMA request / controller
|
||||||
|
*
|
||||||
|
* Based on of_gpio.c
|
||||||
|
*
|
||||||
|
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/device.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/rculist.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/of.h>
|
||||||
|
#include <linux/of_dma.h>
|
||||||
|
|
||||||
|
static LIST_HEAD(of_dma_list);
|
||||||
|
static DEFINE_SPINLOCK(of_dma_lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* of_dma_get_controller - Get a DMA controller in DT DMA helpers list
|
||||||
|
* @dma_spec: pointer to DMA specifier as found in the device tree
|
||||||
|
*
|
||||||
|
* Finds a DMA controller with matching device node and number for dma cells
|
||||||
|
* in a list of registered DMA controllers. If a match is found the use_count
|
||||||
|
* variable is increased and a valid pointer to the DMA data stored is retuned.
|
||||||
|
* A NULL pointer is returned if no match is found.
|
||||||
|
*/
|
||||||
|
static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
|
||||||
|
{
|
||||||
|
struct of_dma *ofdma;
|
||||||
|
|
||||||
|
spin_lock(&of_dma_lock);
|
||||||
|
|
||||||
|
if (list_empty(&of_dma_list)) {
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
|
||||||
|
if ((ofdma->of_node == dma_spec->np) &&
|
||||||
|
(ofdma->of_dma_nbcells == dma_spec->args_count)) {
|
||||||
|
ofdma->use_count++;
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
return ofdma;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
|
||||||
|
pr_debug("%s: can't find DMA controller %s\n", __func__,
|
||||||
|
dma_spec->np->full_name);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* of_dma_put_controller - Decrement use count for a registered DMA controller
|
||||||
|
* @of_dma: pointer to DMA controller data
|
||||||
|
*
|
||||||
|
* Decrements the use_count variable in the DMA data structure. This function
|
||||||
|
* should be called only when a valid pointer is returned from
|
||||||
|
* of_dma_get_controller() and no further accesses to data referenced by that
|
||||||
|
* pointer are needed.
|
||||||
|
*/
|
||||||
|
static void of_dma_put_controller(struct of_dma *ofdma)
|
||||||
|
{
|
||||||
|
spin_lock(&of_dma_lock);
|
||||||
|
ofdma->use_count--;
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* of_dma_controller_register - Register a DMA controller to DT DMA helpers
|
||||||
|
* @np: device node of DMA controller
|
||||||
|
* @of_dma_xlate: translation function which converts a phandle
|
||||||
|
* arguments list into a dma_chan structure
|
||||||
|
* @data pointer to controller specific data to be used by
|
||||||
|
* translation function
|
||||||
|
*
|
||||||
|
* Returns 0 on success or appropriate errno value on error.
|
||||||
|
*
|
||||||
|
* Allocated memory should be freed with appropriate of_dma_controller_free()
|
||||||
|
* call.
|
||||||
|
*/
|
||||||
|
int of_dma_controller_register(struct device_node *np,
|
||||||
|
struct dma_chan *(*of_dma_xlate)
|
||||||
|
(struct of_phandle_args *, struct of_dma *),
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
struct of_dma *ofdma;
|
||||||
|
int nbcells;
|
||||||
|
|
||||||
|
if (!np || !of_dma_xlate) {
|
||||||
|
pr_err("%s: not enough information provided\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
|
||||||
|
if (!ofdma)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
|
||||||
|
if (!nbcells) {
|
||||||
|
pr_err("%s: #dma-cells property is missing or invalid\n",
|
||||||
|
__func__);
|
||||||
|
kfree(ofdma);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ofdma->of_node = np;
|
||||||
|
ofdma->of_dma_nbcells = nbcells;
|
||||||
|
ofdma->of_dma_xlate = of_dma_xlate;
|
||||||
|
ofdma->of_dma_data = data;
|
||||||
|
ofdma->use_count = 0;
|
||||||
|
|
||||||
|
/* Now queue of_dma controller structure in list */
|
||||||
|
spin_lock(&of_dma_lock);
|
||||||
|
list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(of_dma_controller_register);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* of_dma_controller_free - Remove a DMA controller from DT DMA helpers list
|
||||||
|
* @np: device node of DMA controller
|
||||||
|
*
|
||||||
|
* Memory allocated by of_dma_controller_register() is freed here.
|
||||||
|
*/
|
||||||
|
int of_dma_controller_free(struct device_node *np)
|
||||||
|
{
|
||||||
|
struct of_dma *ofdma;
|
||||||
|
|
||||||
|
spin_lock(&of_dma_lock);
|
||||||
|
|
||||||
|
if (list_empty(&of_dma_list)) {
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
|
||||||
|
if (ofdma->of_node == np) {
|
||||||
|
if (ofdma->use_count) {
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_del(&ofdma->of_dma_controllers);
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
kfree(ofdma);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&of_dma_lock);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(of_dma_controller_free);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* of_dma_match_channel - Check if a DMA specifier matches name
|
||||||
|
* @np: device node to look for DMA channels
|
||||||
|
* @name: channel name to be matched
|
||||||
|
* @index: index of DMA specifier in list of DMA specifiers
|
||||||
|
* @dma_spec: pointer to DMA specifier as found in the device tree
|
||||||
|
*
|
||||||
|
* Check if the DMA specifier pointed to by the index in a list of DMA
|
||||||
|
* specifiers, matches the name provided. Returns 0 if the name matches and
|
||||||
|
* a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
|
||||||
|
*/
|
||||||
|
static int of_dma_match_channel(struct device_node *np, char *name, int index,
|
||||||
|
struct of_phandle_args *dma_spec)
|
||||||
|
{
|
||||||
|
const char *s;
|
||||||
|
|
||||||
|
if (of_property_read_string_index(np, "dma-names", index, &s))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (strcmp(name, s))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
|
||||||
|
dma_spec))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* of_dma_request_slave_channel - Get the DMA slave channel
|
||||||
|
* @np: device node to get DMA request from
|
||||||
|
* @name: name of desired channel
|
||||||
|
*
|
||||||
|
* Returns pointer to appropriate dma channel on success or NULL on error.
|
||||||
|
*/
|
||||||
|
struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||||||
|
char *name)
|
||||||
|
{
|
||||||
|
struct of_phandle_args dma_spec;
|
||||||
|
struct of_dma *ofdma;
|
||||||
|
struct dma_chan *chan;
|
||||||
|
int count, i;
|
||||||
|
|
||||||
|
if (!np || !name) {
|
||||||
|
pr_err("%s: not enough information provided\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
count = of_property_count_strings(np, "dma-names");
|
||||||
|
if (count < 0) {
|
||||||
|
pr_err("%s: dma-names property missing or empty\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
if (of_dma_match_channel(np, name, i, &dma_spec))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ofdma = of_dma_get_controller(&dma_spec);
|
||||||
|
|
||||||
|
if (!ofdma)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
|
||||||
|
|
||||||
|
of_dma_put_controller(ofdma);
|
||||||
|
|
||||||
|
of_node_put(dma_spec.np);
|
||||||
|
|
||||||
|
if (chan)
|
||||||
|
return chan;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* of_dma_simple_xlate - Simple DMA engine translation function
|
||||||
|
* @dma_spec: pointer to DMA specifier as found in the device tree
|
||||||
|
* @of_dma: pointer to DMA controller data
|
||||||
|
*
|
||||||
|
* A simple translation function for devices that use a 32-bit value for the
|
||||||
|
* filter_param when calling the DMA engine dma_request_channel() function.
|
||||||
|
* Note that this translation function requires that #dma-cells is equal to 1
|
||||||
|
* and the argument of the dma specifier is the 32-bit filter_param. Returns
|
||||||
|
* pointer to appropriate dma channel on success or NULL on error.
|
||||||
|
*/
|
||||||
|
struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||||||
|
struct of_dma *ofdma)
|
||||||
|
{
|
||||||
|
int count = dma_spec->args_count;
|
||||||
|
struct of_dma_filter_info *info = ofdma->of_dma_data;
|
||||||
|
|
||||||
|
if (!info || !info->filter_fn)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (count != 1)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return dma_request_channel(info->dma_cap, info->filter_fn,
|
||||||
|
&dma_spec->args[0]);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
|
@ -1029,18 +1029,7 @@ static struct pci_driver pch_dma_driver = {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init pch_dma_init(void)
|
module_pci_driver(pch_dma_driver);
|
||||||
{
|
|
||||||
return pci_register_driver(&pch_dma_driver);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __exit pch_dma_exit(void)
|
|
||||||
{
|
|
||||||
pci_unregister_driver(&pch_dma_driver);
|
|
||||||
}
|
|
||||||
|
|
||||||
module_init(pch_dma_init);
|
|
||||||
module_exit(pch_dma_exit);
|
|
||||||
|
|
||||||
MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
|
MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
|
||||||
"DMA controller driver");
|
"DMA controller driver");
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include <linux/amba/pl330.h>
|
#include <linux/amba/pl330.h>
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
|
#include <linux/of_dma.h>
|
||||||
|
|
||||||
#include "dmaengine.h"
|
#include "dmaengine.h"
|
||||||
#define PL330_MAX_CHAN 8
|
#define PL330_MAX_CHAN 8
|
||||||
@ -606,6 +607,11 @@ struct dma_pl330_desc {
|
|||||||
struct dma_pl330_chan *pchan;
|
struct dma_pl330_chan *pchan;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct dma_pl330_filter_args {
|
||||||
|
struct dma_pl330_dmac *pdmac;
|
||||||
|
unsigned int chan_id;
|
||||||
|
};
|
||||||
|
|
||||||
static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
|
static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
|
||||||
{
|
{
|
||||||
if (r && r->xfer_cb)
|
if (r && r->xfer_cb)
|
||||||
@ -2352,6 +2358,16 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
|
|||||||
tasklet_schedule(&pch->task);
|
tasklet_schedule(&pch->task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool pl330_dt_filter(struct dma_chan *chan, void *param)
|
||||||
|
{
|
||||||
|
struct dma_pl330_filter_args *fargs = param;
|
||||||
|
|
||||||
|
if (chan->device != &fargs->pdmac->ddma)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return (chan->chan_id == fargs->chan_id);
|
||||||
|
}
|
||||||
|
|
||||||
bool pl330_filter(struct dma_chan *chan, void *param)
|
bool pl330_filter(struct dma_chan *chan, void *param)
|
||||||
{
|
{
|
||||||
u8 *peri_id;
|
u8 *peri_id;
|
||||||
@ -2359,25 +2375,35 @@ bool pl330_filter(struct dma_chan *chan, void *param)
|
|||||||
if (chan->device->dev->driver != &pl330_driver.drv)
|
if (chan->device->dev->driver != &pl330_driver.drv)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
#ifdef CONFIG_OF
|
|
||||||
if (chan->device->dev->of_node) {
|
|
||||||
const __be32 *prop_value;
|
|
||||||
phandle phandle;
|
|
||||||
struct device_node *node;
|
|
||||||
|
|
||||||
prop_value = ((struct property *)param)->value;
|
|
||||||
phandle = be32_to_cpup(prop_value++);
|
|
||||||
node = of_find_node_by_phandle(phandle);
|
|
||||||
return ((chan->private == node) &&
|
|
||||||
(chan->chan_id == be32_to_cpup(prop_value)));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
peri_id = chan->private;
|
peri_id = chan->private;
|
||||||
return *peri_id == (unsigned)param;
|
return *peri_id == (unsigned)param;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(pl330_filter);
|
EXPORT_SYMBOL(pl330_filter);
|
||||||
|
|
||||||
|
static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
|
||||||
|
struct of_dma *ofdma)
|
||||||
|
{
|
||||||
|
int count = dma_spec->args_count;
|
||||||
|
struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
|
||||||
|
struct dma_pl330_filter_args fargs;
|
||||||
|
dma_cap_mask_t cap;
|
||||||
|
|
||||||
|
if (!pdmac)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (count != 1)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
fargs.pdmac = pdmac;
|
||||||
|
fargs.chan_id = dma_spec->args[0];
|
||||||
|
|
||||||
|
dma_cap_zero(cap);
|
||||||
|
dma_cap_set(DMA_SLAVE, cap);
|
||||||
|
dma_cap_set(DMA_CYCLIC, cap);
|
||||||
|
|
||||||
|
return dma_request_channel(cap, pl330_dt_filter, &fargs);
|
||||||
|
}
|
||||||
|
|
||||||
static int pl330_alloc_chan_resources(struct dma_chan *chan)
|
static int pl330_alloc_chan_resources(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct dma_pl330_chan *pch = to_pchan(chan);
|
struct dma_pl330_chan *pch = to_pchan(chan);
|
||||||
@ -2866,7 +2892,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
pdat = adev->dev.platform_data;
|
pdat = adev->dev.platform_data;
|
||||||
|
|
||||||
/* Allocate a new DMAC and its Channels */
|
/* Allocate a new DMAC and its Channels */
|
||||||
pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
|
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
|
||||||
if (!pdmac) {
|
if (!pdmac) {
|
||||||
dev_err(&adev->dev, "unable to allocate mem\n");
|
dev_err(&adev->dev, "unable to allocate mem\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -2878,13 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
|
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
|
||||||
|
|
||||||
res = &adev->res;
|
res = &adev->res;
|
||||||
request_mem_region(res->start, resource_size(res), "dma-pl330");
|
pi->base = devm_request_and_ioremap(&adev->dev, res);
|
||||||
|
if (!pi->base)
|
||||||
pi->base = ioremap(res->start, resource_size(res));
|
return -ENXIO;
|
||||||
if (!pi->base) {
|
|
||||||
ret = -ENXIO;
|
|
||||||
goto probe_err1;
|
|
||||||
}
|
|
||||||
|
|
||||||
amba_set_drvdata(adev, pdmac);
|
amba_set_drvdata(adev, pdmac);
|
||||||
|
|
||||||
@ -2892,11 +2914,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
ret = request_irq(irq, pl330_irq_handler, 0,
|
ret = request_irq(irq, pl330_irq_handler, 0,
|
||||||
dev_name(&adev->dev), pi);
|
dev_name(&adev->dev), pi);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto probe_err2;
|
return ret;
|
||||||
|
|
||||||
ret = pl330_add(pi);
|
ret = pl330_add(pi);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto probe_err3;
|
goto probe_err1;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&pdmac->desc_pool);
|
INIT_LIST_HEAD(&pdmac->desc_pool);
|
||||||
spin_lock_init(&pdmac->pool_lock);
|
spin_lock_init(&pdmac->pool_lock);
|
||||||
@ -2918,7 +2940,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
if (!pdmac->peripherals) {
|
if (!pdmac->peripherals) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
|
dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
|
||||||
goto probe_err4;
|
goto probe_err2;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_chan; i++) {
|
for (i = 0; i < num_chan; i++) {
|
||||||
@ -2962,7 +2984,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
ret = dma_async_device_register(pd);
|
ret = dma_async_device_register(pd);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&adev->dev, "unable to register DMAC\n");
|
dev_err(&adev->dev, "unable to register DMAC\n");
|
||||||
goto probe_err4;
|
goto probe_err2;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(&adev->dev,
|
dev_info(&adev->dev,
|
||||||
@ -2973,17 +2995,20 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
|
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
|
||||||
pi->pcfg.num_peri, pi->pcfg.num_events);
|
pi->pcfg.num_peri, pi->pcfg.num_events);
|
||||||
|
|
||||||
|
ret = of_dma_controller_register(adev->dev.of_node,
|
||||||
|
of_dma_pl330_xlate, pdmac);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&adev->dev,
|
||||||
|
"unable to register DMA to the generic DT DMA helpers\n");
|
||||||
|
goto probe_err2;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
probe_err4:
|
|
||||||
pl330_del(pi);
|
|
||||||
probe_err3:
|
|
||||||
free_irq(irq, pi);
|
|
||||||
probe_err2:
|
probe_err2:
|
||||||
iounmap(pi->base);
|
pl330_del(pi);
|
||||||
probe_err1:
|
probe_err1:
|
||||||
release_mem_region(res->start, resource_size(res));
|
free_irq(irq, pi);
|
||||||
kfree(pdmac);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2993,12 +3018,13 @@ static int pl330_remove(struct amba_device *adev)
|
|||||||
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
|
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
|
||||||
struct dma_pl330_chan *pch, *_p;
|
struct dma_pl330_chan *pch, *_p;
|
||||||
struct pl330_info *pi;
|
struct pl330_info *pi;
|
||||||
struct resource *res;
|
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
if (!pdmac)
|
if (!pdmac)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
of_dma_controller_free(adev->dev.of_node);
|
||||||
|
|
||||||
amba_set_drvdata(adev, NULL);
|
amba_set_drvdata(adev, NULL);
|
||||||
|
|
||||||
/* Idle the DMAC */
|
/* Idle the DMAC */
|
||||||
@ -3020,13 +3046,6 @@ static int pl330_remove(struct amba_device *adev)
|
|||||||
irq = adev->irq[0];
|
irq = adev->irq[0];
|
||||||
free_irq(irq, pi);
|
free_irq(irq, pi);
|
||||||
|
|
||||||
iounmap(pi->base);
|
|
||||||
|
|
||||||
res = &adev->res;
|
|
||||||
release_mem_region(res->start, resource_size(res));
|
|
||||||
|
|
||||||
kfree(pdmac);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -638,9 +638,6 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!chan)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case DMA_TERMINATE_ALL:
|
case DMA_TERMINATE_ALL:
|
||||||
spin_lock_irqsave(&schan->chan_lock, flags);
|
spin_lock_irqsave(&schan->chan_lock, flags);
|
||||||
|
@ -326,7 +326,7 @@ static int sh_dmae_set_slave(struct shdma_chan *schan,
|
|||||||
shdma_chan);
|
shdma_chan);
|
||||||
const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
|
const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
|
||||||
if (!cfg)
|
if (!cfg)
|
||||||
return -ENODEV;
|
return -ENXIO;
|
||||||
|
|
||||||
if (!try)
|
if (!try)
|
||||||
sh_chan->config = cfg;
|
sh_chan->config = cfg;
|
||||||
|
@ -32,7 +32,9 @@
|
|||||||
#define SIRFSOC_DMA_CH_VALID 0x140
|
#define SIRFSOC_DMA_CH_VALID 0x140
|
||||||
#define SIRFSOC_DMA_CH_INT 0x144
|
#define SIRFSOC_DMA_CH_INT 0x144
|
||||||
#define SIRFSOC_DMA_INT_EN 0x148
|
#define SIRFSOC_DMA_INT_EN 0x148
|
||||||
|
#define SIRFSOC_DMA_INT_EN_CLR 0x14C
|
||||||
#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
|
#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
|
||||||
|
#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
|
||||||
|
|
||||||
#define SIRFSOC_DMA_MODE_CTRL_BIT 4
|
#define SIRFSOC_DMA_MODE_CTRL_BIT 4
|
||||||
#define SIRFSOC_DMA_DIR_CTRL_BIT 5
|
#define SIRFSOC_DMA_DIR_CTRL_BIT 5
|
||||||
@ -76,6 +78,7 @@ struct sirfsoc_dma {
|
|||||||
struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
|
struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
int irq;
|
int irq;
|
||||||
|
bool is_marco;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define DRV_NAME "sirfsoc_dma"
|
#define DRV_NAME "sirfsoc_dma"
|
||||||
@ -288,17 +291,67 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
|
|||||||
int cid = schan->chan.chan_id;
|
int cid = schan->chan.chan_id;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
|
spin_lock_irqsave(&schan->lock, flags);
|
||||||
~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
|
|
||||||
|
if (!sdma->is_marco) {
|
||||||
|
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
|
||||||
|
~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
|
||||||
|
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
|
||||||
|
& ~((1 << cid) | 1 << (cid + 16)),
|
||||||
|
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||||||
|
} else {
|
||||||
|
writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
|
||||||
|
writel_relaxed((1 << cid) | 1 << (cid + 16),
|
||||||
|
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
|
||||||
|
}
|
||||||
|
|
||||||
writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
|
writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
|
||||||
|
|
||||||
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
|
|
||||||
& ~((1 << cid) | 1 << (cid + 16)),
|
|
||||||
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&schan->lock, flags);
|
|
||||||
list_splice_tail_init(&schan->active, &schan->free);
|
list_splice_tail_init(&schan->active, &schan->free);
|
||||||
list_splice_tail_init(&schan->queued, &schan->free);
|
list_splice_tail_init(&schan->queued, &schan->free);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&schan->lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
|
||||||
|
{
|
||||||
|
struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
|
||||||
|
int cid = schan->chan.chan_id;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&schan->lock, flags);
|
||||||
|
|
||||||
|
if (!sdma->is_marco)
|
||||||
|
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
|
||||||
|
& ~((1 << cid) | 1 << (cid + 16)),
|
||||||
|
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||||||
|
else
|
||||||
|
writel_relaxed((1 << cid) | 1 << (cid + 16),
|
||||||
|
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&schan->lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
|
||||||
|
{
|
||||||
|
struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
|
||||||
|
int cid = schan->chan.chan_id;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&schan->lock, flags);
|
||||||
|
|
||||||
|
if (!sdma->is_marco)
|
||||||
|
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
|
||||||
|
| ((1 << cid) | 1 << (cid + 16)),
|
||||||
|
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||||||
|
else
|
||||||
|
writel_relaxed((1 << cid) | 1 << (cid + 16),
|
||||||
|
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&schan->lock, flags);
|
spin_unlock_irqrestore(&schan->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -311,6 +364,10 @@ static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
|
case DMA_PAUSE:
|
||||||
|
return sirfsoc_dma_pause_chan(schan);
|
||||||
|
case DMA_RESUME:
|
||||||
|
return sirfsoc_dma_resume_chan(schan);
|
||||||
case DMA_TERMINATE_ALL:
|
case DMA_TERMINATE_ALL:
|
||||||
return sirfsoc_dma_terminate_all(schan);
|
return sirfsoc_dma_terminate_all(schan);
|
||||||
case DMA_SLAVE_CONFIG:
|
case DMA_SLAVE_CONFIG:
|
||||||
@ -568,6 +625,9 @@ static int sirfsoc_dma_probe(struct platform_device *op)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (of_device_is_compatible(dn, "sirf,marco-dmac"))
|
||||||
|
sdma->is_marco = true;
|
||||||
|
|
||||||
if (of_property_read_u32(dn, "cell-index", &id)) {
|
if (of_property_read_u32(dn, "cell-index", &id)) {
|
||||||
dev_err(dev, "Fail to get DMAC index\n");
|
dev_err(dev, "Fail to get DMAC index\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@ -668,6 +728,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
|
|||||||
|
|
||||||
static struct of_device_id sirfsoc_dma_match[] = {
|
static struct of_device_id sirfsoc_dma_match[] = {
|
||||||
{ .compatible = "sirf,prima2-dmac", },
|
{ .compatible = "sirf,prima2-dmac", },
|
||||||
|
{ .compatible = "sirf,marco-dmac", },
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -53,6 +53,8 @@
|
|||||||
#define D40_ALLOC_PHY (1 << 30)
|
#define D40_ALLOC_PHY (1 << 30)
|
||||||
#define D40_ALLOC_LOG_FREE 0
|
#define D40_ALLOC_LOG_FREE 0
|
||||||
|
|
||||||
|
#define MAX(a, b) (((a) < (b)) ? (b) : (a))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* enum 40_command - The different commands and/or statuses.
|
* enum 40_command - The different commands and/or statuses.
|
||||||
*
|
*
|
||||||
@ -100,8 +102,19 @@ static u32 d40_backup_regs[] = {
|
|||||||
|
|
||||||
#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
|
#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
|
||||||
|
|
||||||
/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
|
/*
|
||||||
static u32 d40_backup_regs_v3[] = {
|
* since 9540 and 8540 has the same HW revision
|
||||||
|
* use v4a for 9540 or ealier
|
||||||
|
* use v4b for 8540 or later
|
||||||
|
* HW revision:
|
||||||
|
* DB8500ed has revision 0
|
||||||
|
* DB8500v1 has revision 2
|
||||||
|
* DB8500v2 has revision 3
|
||||||
|
* AP9540v1 has revision 4
|
||||||
|
* DB8540v1 has revision 4
|
||||||
|
* TODO: Check if all these registers have to be saved/restored on dma40 v4a
|
||||||
|
*/
|
||||||
|
static u32 d40_backup_regs_v4a[] = {
|
||||||
D40_DREG_PSEG1,
|
D40_DREG_PSEG1,
|
||||||
D40_DREG_PSEG2,
|
D40_DREG_PSEG2,
|
||||||
D40_DREG_PSEG3,
|
D40_DREG_PSEG3,
|
||||||
@ -120,7 +133,32 @@ static u32 d40_backup_regs_v3[] = {
|
|||||||
D40_DREG_RCEG4,
|
D40_DREG_RCEG4,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
|
#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
|
||||||
|
|
||||||
|
static u32 d40_backup_regs_v4b[] = {
|
||||||
|
D40_DREG_CPSEG1,
|
||||||
|
D40_DREG_CPSEG2,
|
||||||
|
D40_DREG_CPSEG3,
|
||||||
|
D40_DREG_CPSEG4,
|
||||||
|
D40_DREG_CPSEG5,
|
||||||
|
D40_DREG_CPCEG1,
|
||||||
|
D40_DREG_CPCEG2,
|
||||||
|
D40_DREG_CPCEG3,
|
||||||
|
D40_DREG_CPCEG4,
|
||||||
|
D40_DREG_CPCEG5,
|
||||||
|
D40_DREG_CRSEG1,
|
||||||
|
D40_DREG_CRSEG2,
|
||||||
|
D40_DREG_CRSEG3,
|
||||||
|
D40_DREG_CRSEG4,
|
||||||
|
D40_DREG_CRSEG5,
|
||||||
|
D40_DREG_CRCEG1,
|
||||||
|
D40_DREG_CRCEG2,
|
||||||
|
D40_DREG_CRCEG3,
|
||||||
|
D40_DREG_CRCEG4,
|
||||||
|
D40_DREG_CRCEG5,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
|
||||||
|
|
||||||
static u32 d40_backup_regs_chan[] = {
|
static u32 d40_backup_regs_chan[] = {
|
||||||
D40_CHAN_REG_SSCFG,
|
D40_CHAN_REG_SSCFG,
|
||||||
@ -133,6 +171,102 @@ static u32 d40_backup_regs_chan[] = {
|
|||||||
D40_CHAN_REG_SDLNK,
|
D40_CHAN_REG_SDLNK,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct d40_interrupt_lookup - lookup table for interrupt handler
|
||||||
|
*
|
||||||
|
* @src: Interrupt mask register.
|
||||||
|
* @clr: Interrupt clear register.
|
||||||
|
* @is_error: true if this is an error interrupt.
|
||||||
|
* @offset: start delta in the lookup_log_chans in d40_base. If equals to
|
||||||
|
* D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
|
||||||
|
*/
|
||||||
|
struct d40_interrupt_lookup {
|
||||||
|
u32 src;
|
||||||
|
u32 clr;
|
||||||
|
bool is_error;
|
||||||
|
int offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
static struct d40_interrupt_lookup il_v4a[] = {
|
||||||
|
{D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
|
||||||
|
{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
|
||||||
|
{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
|
||||||
|
{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
|
||||||
|
{D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
|
||||||
|
{D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
|
||||||
|
{D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
|
||||||
|
{D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
|
||||||
|
{D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
|
||||||
|
{D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct d40_interrupt_lookup il_v4b[] = {
|
||||||
|
{D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
|
||||||
|
{D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
|
||||||
|
{D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
|
||||||
|
{D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
|
||||||
|
{D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
|
||||||
|
{D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
|
||||||
|
{D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
|
||||||
|
{D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
|
||||||
|
{D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
|
||||||
|
{D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
|
||||||
|
{D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
|
||||||
|
{D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct d40_reg_val - simple lookup struct
|
||||||
|
*
|
||||||
|
* @reg: The register.
|
||||||
|
* @val: The value that belongs to the register in reg.
|
||||||
|
*/
|
||||||
|
struct d40_reg_val {
|
||||||
|
unsigned int reg;
|
||||||
|
unsigned int val;
|
||||||
|
};
|
||||||
|
|
||||||
|
static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
|
||||||
|
/* Clock every part of the DMA block from start */
|
||||||
|
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
|
||||||
|
|
||||||
|
/* Interrupts on all logical channels */
|
||||||
|
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
|
||||||
|
};
|
||||||
|
static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
|
||||||
|
/* Clock every part of the DMA block from start */
|
||||||
|
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
|
||||||
|
|
||||||
|
/* Interrupts on all logical channels */
|
||||||
|
{ .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
|
||||||
|
{ .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct d40_lli_pool - Structure for keeping LLIs in memory
|
* struct d40_lli_pool - Structure for keeping LLIs in memory
|
||||||
*
|
*
|
||||||
@ -221,6 +355,7 @@ struct d40_lcla_pool {
|
|||||||
* @allocated_dst: Same as for src but is dst.
|
* @allocated_dst: Same as for src but is dst.
|
||||||
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
|
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
|
||||||
* event line number.
|
* event line number.
|
||||||
|
* @use_soft_lli: To mark if the linked lists of channel are managed by SW.
|
||||||
*/
|
*/
|
||||||
struct d40_phy_res {
|
struct d40_phy_res {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
@ -228,6 +363,7 @@ struct d40_phy_res {
|
|||||||
int num;
|
int num;
|
||||||
u32 allocated_src;
|
u32 allocated_src;
|
||||||
u32 allocated_dst;
|
u32 allocated_dst;
|
||||||
|
bool use_soft_lli;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct d40_base;
|
struct d40_base;
|
||||||
@ -248,6 +384,7 @@ struct d40_base;
|
|||||||
* @client: Cliented owned descriptor list.
|
* @client: Cliented owned descriptor list.
|
||||||
* @pending_queue: Submitted jobs, to be issued by issue_pending()
|
* @pending_queue: Submitted jobs, to be issued by issue_pending()
|
||||||
* @active: Active descriptor.
|
* @active: Active descriptor.
|
||||||
|
* @done: Completed jobs
|
||||||
* @queue: Queued jobs.
|
* @queue: Queued jobs.
|
||||||
* @prepare_queue: Prepared jobs.
|
* @prepare_queue: Prepared jobs.
|
||||||
* @dma_cfg: The client configuration of this dma channel.
|
* @dma_cfg: The client configuration of this dma channel.
|
||||||
@ -273,6 +410,7 @@ struct d40_chan {
|
|||||||
struct list_head client;
|
struct list_head client;
|
||||||
struct list_head pending_queue;
|
struct list_head pending_queue;
|
||||||
struct list_head active;
|
struct list_head active;
|
||||||
|
struct list_head done;
|
||||||
struct list_head queue;
|
struct list_head queue;
|
||||||
struct list_head prepare_queue;
|
struct list_head prepare_queue;
|
||||||
struct stedma40_chan_cfg dma_cfg;
|
struct stedma40_chan_cfg dma_cfg;
|
||||||
@ -288,6 +426,38 @@ struct d40_chan {
|
|||||||
enum dma_transfer_direction runtime_direction;
|
enum dma_transfer_direction runtime_direction;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
|
||||||
|
* controller
|
||||||
|
*
|
||||||
|
* @backup: the pointer to the registers address array for backup
|
||||||
|
* @backup_size: the size of the registers address array for backup
|
||||||
|
* @realtime_en: the realtime enable register
|
||||||
|
* @realtime_clear: the realtime clear register
|
||||||
|
* @high_prio_en: the high priority enable register
|
||||||
|
* @high_prio_clear: the high priority clear register
|
||||||
|
* @interrupt_en: the interrupt enable register
|
||||||
|
* @interrupt_clear: the interrupt clear register
|
||||||
|
* @il: the pointer to struct d40_interrupt_lookup
|
||||||
|
* @il_size: the size of d40_interrupt_lookup array
|
||||||
|
* @init_reg: the pointer to the struct d40_reg_val
|
||||||
|
* @init_reg_size: the size of d40_reg_val array
|
||||||
|
*/
|
||||||
|
struct d40_gen_dmac {
|
||||||
|
u32 *backup;
|
||||||
|
u32 backup_size;
|
||||||
|
u32 realtime_en;
|
||||||
|
u32 realtime_clear;
|
||||||
|
u32 high_prio_en;
|
||||||
|
u32 high_prio_clear;
|
||||||
|
u32 interrupt_en;
|
||||||
|
u32 interrupt_clear;
|
||||||
|
struct d40_interrupt_lookup *il;
|
||||||
|
u32 il_size;
|
||||||
|
struct d40_reg_val *init_reg;
|
||||||
|
u32 init_reg_size;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct d40_base - The big global struct, one for each probe'd instance.
|
* struct d40_base - The big global struct, one for each probe'd instance.
|
||||||
*
|
*
|
||||||
@ -326,11 +496,13 @@ struct d40_chan {
|
|||||||
* @desc_slab: cache for descriptors.
|
* @desc_slab: cache for descriptors.
|
||||||
* @reg_val_backup: Here the values of some hardware registers are stored
|
* @reg_val_backup: Here the values of some hardware registers are stored
|
||||||
* before the DMA is powered off. They are restored when the power is back on.
|
* before the DMA is powered off. They are restored when the power is back on.
|
||||||
* @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
|
* @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
|
||||||
* later.
|
* later
|
||||||
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
|
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
|
||||||
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
|
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
|
||||||
* @initialized: true if the dma has been initialized
|
* @initialized: true if the dma has been initialized
|
||||||
|
* @gen_dmac: the struct for generic registers values to represent u8500/8540
|
||||||
|
* DMA controller
|
||||||
*/
|
*/
|
||||||
struct d40_base {
|
struct d40_base {
|
||||||
spinlock_t interrupt_lock;
|
spinlock_t interrupt_lock;
|
||||||
@ -344,6 +516,7 @@ struct d40_base {
|
|||||||
int irq;
|
int irq;
|
||||||
int num_phy_chans;
|
int num_phy_chans;
|
||||||
int num_log_chans;
|
int num_log_chans;
|
||||||
|
struct device_dma_parameters dma_parms;
|
||||||
struct dma_device dma_both;
|
struct dma_device dma_both;
|
||||||
struct dma_device dma_slave;
|
struct dma_device dma_slave;
|
||||||
struct dma_device dma_memcpy;
|
struct dma_device dma_memcpy;
|
||||||
@ -361,37 +534,11 @@ struct d40_base {
|
|||||||
resource_size_t lcpa_size;
|
resource_size_t lcpa_size;
|
||||||
struct kmem_cache *desc_slab;
|
struct kmem_cache *desc_slab;
|
||||||
u32 reg_val_backup[BACKUP_REGS_SZ];
|
u32 reg_val_backup[BACKUP_REGS_SZ];
|
||||||
u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
|
u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
|
||||||
u32 *reg_val_backup_chan;
|
u32 *reg_val_backup_chan;
|
||||||
u16 gcc_pwr_off_mask;
|
u16 gcc_pwr_off_mask;
|
||||||
bool initialized;
|
bool initialized;
|
||||||
};
|
struct d40_gen_dmac gen_dmac;
|
||||||
|
|
||||||
/**
|
|
||||||
* struct d40_interrupt_lookup - lookup table for interrupt handler
|
|
||||||
*
|
|
||||||
* @src: Interrupt mask register.
|
|
||||||
* @clr: Interrupt clear register.
|
|
||||||
* @is_error: true if this is an error interrupt.
|
|
||||||
* @offset: start delta in the lookup_log_chans in d40_base. If equals to
|
|
||||||
* D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
|
|
||||||
*/
|
|
||||||
struct d40_interrupt_lookup {
|
|
||||||
u32 src;
|
|
||||||
u32 clr;
|
|
||||||
bool is_error;
|
|
||||||
int offset;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* struct d40_reg_val - simple lookup struct
|
|
||||||
*
|
|
||||||
* @reg: The register.
|
|
||||||
* @val: The value that belongs to the register in reg.
|
|
||||||
*/
|
|
||||||
struct d40_reg_val {
|
|
||||||
unsigned int reg;
|
|
||||||
unsigned int val;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct device *chan2dev(struct d40_chan *d40c)
|
static struct device *chan2dev(struct d40_chan *d40c)
|
||||||
@ -494,19 +641,18 @@ static int d40_lcla_alloc_one(struct d40_chan *d40c,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
int p;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
|
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
|
||||||
|
|
||||||
p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate both src and dst at the same time, therefore the half
|
* Allocate both src and dst at the same time, therefore the half
|
||||||
* start on 1 since 0 can't be used since zero is used as end marker.
|
* start on 1 since 0 can't be used since zero is used as end marker.
|
||||||
*/
|
*/
|
||||||
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
|
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
|
||||||
if (!d40c->base->lcla_pool.alloc_map[p + i]) {
|
int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
|
||||||
d40c->base->lcla_pool.alloc_map[p + i] = d40d;
|
|
||||||
|
if (!d40c->base->lcla_pool.alloc_map[idx]) {
|
||||||
|
d40c->base->lcla_pool.alloc_map[idx] = d40d;
|
||||||
d40d->lcla_alloc++;
|
d40d->lcla_alloc++;
|
||||||
ret = i;
|
ret = i;
|
||||||
break;
|
break;
|
||||||
@ -531,10 +677,10 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
|
|||||||
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
|
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
|
||||||
|
|
||||||
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
|
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
|
||||||
if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
|
int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
|
||||||
D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
|
|
||||||
d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
|
if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
|
||||||
D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
|
d40c->base->lcla_pool.alloc_map[idx] = NULL;
|
||||||
d40d->lcla_alloc--;
|
d40d->lcla_alloc--;
|
||||||
if (d40d->lcla_alloc == 0) {
|
if (d40d->lcla_alloc == 0) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -611,6 +757,11 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
|
|||||||
writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
|
writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
|
||||||
|
{
|
||||||
|
list_add_tail(&desc->node, &d40c->done);
|
||||||
|
}
|
||||||
|
|
||||||
static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
|
static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
|
||||||
{
|
{
|
||||||
struct d40_lcla_pool *pool = &chan->base->lcla_pool;
|
struct d40_lcla_pool *pool = &chan->base->lcla_pool;
|
||||||
@ -634,7 +785,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
|
|||||||
* can't link back to the one in LCPA space
|
* can't link back to the one in LCPA space
|
||||||
*/
|
*/
|
||||||
if (linkback || (lli_len - lli_current > 1)) {
|
if (linkback || (lli_len - lli_current > 1)) {
|
||||||
curr_lcla = d40_lcla_alloc_one(chan, desc);
|
/*
|
||||||
|
* If the channel is expected to use only soft_lli don't
|
||||||
|
* allocate a lcla. This is to avoid a HW issue that exists
|
||||||
|
* in some controller during a peripheral to memory transfer
|
||||||
|
* that uses linked lists.
|
||||||
|
*/
|
||||||
|
if (!(chan->phy_chan->use_soft_lli &&
|
||||||
|
chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
|
||||||
|
curr_lcla = d40_lcla_alloc_one(chan, desc);
|
||||||
|
|
||||||
first_lcla = curr_lcla;
|
first_lcla = curr_lcla;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -771,6 +931,14 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
|
|||||||
return d;
|
return d;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct d40_desc *d40_first_done(struct d40_chan *d40c)
|
||||||
|
{
|
||||||
|
if (list_empty(&d40c->done))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return list_first_entry(&d40c->done, struct d40_desc, node);
|
||||||
|
}
|
||||||
|
|
||||||
static int d40_psize_2_burst_size(bool is_log, int psize)
|
static int d40_psize_2_burst_size(bool is_log, int psize)
|
||||||
{
|
{
|
||||||
if (is_log) {
|
if (is_log) {
|
||||||
@ -874,11 +1042,11 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
|
|||||||
save);
|
save);
|
||||||
|
|
||||||
/* Save/Restore registers only existing on dma40 v3 and later */
|
/* Save/Restore registers only existing on dma40 v3 and later */
|
||||||
if (base->rev >= 3)
|
if (base->gen_dmac.backup)
|
||||||
dma40_backup(base->virtbase, base->reg_val_backup_v3,
|
dma40_backup(base->virtbase, base->reg_val_backup_v4,
|
||||||
d40_backup_regs_v3,
|
base->gen_dmac.backup,
|
||||||
ARRAY_SIZE(d40_backup_regs_v3),
|
base->gen_dmac.backup_size,
|
||||||
save);
|
save);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void d40_save_restore_registers(struct d40_base *base, bool save)
|
static void d40_save_restore_registers(struct d40_base *base, bool save)
|
||||||
@ -961,6 +1129,12 @@ static void d40_term_all(struct d40_chan *d40c)
|
|||||||
struct d40_desc *d40d;
|
struct d40_desc *d40d;
|
||||||
struct d40_desc *_d;
|
struct d40_desc *_d;
|
||||||
|
|
||||||
|
/* Release completed descriptors */
|
||||||
|
while ((d40d = d40_first_done(d40c))) {
|
||||||
|
d40_desc_remove(d40d);
|
||||||
|
d40_desc_free(d40c, d40d);
|
||||||
|
}
|
||||||
|
|
||||||
/* Release active descriptors */
|
/* Release active descriptors */
|
||||||
while ((d40d = d40_first_active_get(d40c))) {
|
while ((d40d = d40_first_active_get(d40c))) {
|
||||||
d40_desc_remove(d40d);
|
d40_desc_remove(d40d);
|
||||||
@ -1396,6 +1570,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
|
|||||||
d40c->busy = false;
|
d40c->busy = false;
|
||||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||||
|
|
||||||
|
d40_desc_remove(d40d);
|
||||||
|
d40_desc_done(d40c, d40d);
|
||||||
}
|
}
|
||||||
|
|
||||||
d40c->pending_tx++;
|
d40c->pending_tx++;
|
||||||
@ -1413,10 +1590,14 @@ static void dma_tasklet(unsigned long data)
|
|||||||
|
|
||||||
spin_lock_irqsave(&d40c->lock, flags);
|
spin_lock_irqsave(&d40c->lock, flags);
|
||||||
|
|
||||||
/* Get first active entry from list */
|
/* Get first entry from the done list */
|
||||||
d40d = d40_first_active_get(d40c);
|
d40d = d40_first_done(d40c);
|
||||||
if (d40d == NULL)
|
if (d40d == NULL) {
|
||||||
goto err;
|
/* Check if we have reached here for cyclic job */
|
||||||
|
d40d = d40_first_active_get(d40c);
|
||||||
|
if (d40d == NULL || !d40d->cyclic)
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
if (!d40d->cyclic)
|
if (!d40d->cyclic)
|
||||||
dma_cookie_complete(&d40d->txd);
|
dma_cookie_complete(&d40d->txd);
|
||||||
@ -1438,13 +1619,11 @@ static void dma_tasklet(unsigned long data)
|
|||||||
if (async_tx_test_ack(&d40d->txd)) {
|
if (async_tx_test_ack(&d40d->txd)) {
|
||||||
d40_desc_remove(d40d);
|
d40_desc_remove(d40d);
|
||||||
d40_desc_free(d40c, d40d);
|
d40_desc_free(d40c, d40d);
|
||||||
} else {
|
} else if (!d40d->is_in_client_list) {
|
||||||
if (!d40d->is_in_client_list) {
|
d40_desc_remove(d40d);
|
||||||
d40_desc_remove(d40d);
|
d40_lcla_free_all(d40c, d40d);
|
||||||
d40_lcla_free_all(d40c, d40d);
|
list_add_tail(&d40d->node, &d40c->client);
|
||||||
list_add_tail(&d40d->node, &d40c->client);
|
d40d->is_in_client_list = true;
|
||||||
d40d->is_in_client_list = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1469,53 +1648,51 @@ err:
|
|||||||
|
|
||||||
static irqreturn_t d40_handle_interrupt(int irq, void *data)
|
static irqreturn_t d40_handle_interrupt(int irq, void *data)
|
||||||
{
|
{
|
||||||
static const struct d40_interrupt_lookup il[] = {
|
|
||||||
{D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
|
|
||||||
{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
|
|
||||||
{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
|
|
||||||
{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
|
|
||||||
{D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
|
|
||||||
{D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
|
|
||||||
{D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
|
|
||||||
{D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
|
|
||||||
{D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
|
|
||||||
{D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
|
|
||||||
};
|
|
||||||
|
|
||||||
int i;
|
int i;
|
||||||
u32 regs[ARRAY_SIZE(il)];
|
|
||||||
u32 idx;
|
u32 idx;
|
||||||
u32 row;
|
u32 row;
|
||||||
long chan = -1;
|
long chan = -1;
|
||||||
struct d40_chan *d40c;
|
struct d40_chan *d40c;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct d40_base *base = data;
|
struct d40_base *base = data;
|
||||||
|
u32 regs[base->gen_dmac.il_size];
|
||||||
|
struct d40_interrupt_lookup *il = base->gen_dmac.il;
|
||||||
|
u32 il_size = base->gen_dmac.il_size;
|
||||||
|
|
||||||
spin_lock_irqsave(&base->interrupt_lock, flags);
|
spin_lock_irqsave(&base->interrupt_lock, flags);
|
||||||
|
|
||||||
/* Read interrupt status of both logical and physical channels */
|
/* Read interrupt status of both logical and physical channels */
|
||||||
for (i = 0; i < ARRAY_SIZE(il); i++)
|
for (i = 0; i < il_size; i++)
|
||||||
regs[i] = readl(base->virtbase + il[i].src);
|
regs[i] = readl(base->virtbase + il[i].src);
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
|
||||||
chan = find_next_bit((unsigned long *)regs,
|
chan = find_next_bit((unsigned long *)regs,
|
||||||
BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
|
BITS_PER_LONG * il_size, chan + 1);
|
||||||
|
|
||||||
/* No more set bits found? */
|
/* No more set bits found? */
|
||||||
if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
|
if (chan == BITS_PER_LONG * il_size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
row = chan / BITS_PER_LONG;
|
row = chan / BITS_PER_LONG;
|
||||||
idx = chan & (BITS_PER_LONG - 1);
|
idx = chan & (BITS_PER_LONG - 1);
|
||||||
|
|
||||||
/* ACK interrupt */
|
|
||||||
writel(1 << idx, base->virtbase + il[row].clr);
|
|
||||||
|
|
||||||
if (il[row].offset == D40_PHY_CHAN)
|
if (il[row].offset == D40_PHY_CHAN)
|
||||||
d40c = base->lookup_phy_chans[idx];
|
d40c = base->lookup_phy_chans[idx];
|
||||||
else
|
else
|
||||||
d40c = base->lookup_log_chans[il[row].offset + idx];
|
d40c = base->lookup_log_chans[il[row].offset + idx];
|
||||||
|
|
||||||
|
if (!d40c) {
|
||||||
|
/*
|
||||||
|
* No error because this can happen if something else
|
||||||
|
* in the system is using the channel.
|
||||||
|
*/
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ACK interrupt */
|
||||||
|
writel(1 << idx, base->virtbase + il[row].clr);
|
||||||
|
|
||||||
spin_lock(&d40c->lock);
|
spin_lock(&d40c->lock);
|
||||||
|
|
||||||
if (!il[row].is_error)
|
if (!il[row].is_error)
|
||||||
@ -1710,10 +1887,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
|
|||||||
int i;
|
int i;
|
||||||
int j;
|
int j;
|
||||||
int log_num;
|
int log_num;
|
||||||
|
int num_phy_chans;
|
||||||
bool is_src;
|
bool is_src;
|
||||||
bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
|
bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
|
||||||
|
|
||||||
phys = d40c->base->phy_res;
|
phys = d40c->base->phy_res;
|
||||||
|
num_phy_chans = d40c->base->num_phy_chans;
|
||||||
|
|
||||||
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
|
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
|
||||||
dev_type = d40c->dma_cfg.src_dev_type;
|
dev_type = d40c->dma_cfg.src_dev_type;
|
||||||
@ -1734,12 +1913,19 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
|
|||||||
if (!is_log) {
|
if (!is_log) {
|
||||||
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
|
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
|
||||||
/* Find physical half channel */
|
/* Find physical half channel */
|
||||||
for (i = 0; i < d40c->base->num_phy_chans; i++) {
|
if (d40c->dma_cfg.use_fixed_channel) {
|
||||||
|
i = d40c->dma_cfg.phy_channel;
|
||||||
if (d40_alloc_mask_set(&phys[i], is_src,
|
if (d40_alloc_mask_set(&phys[i], is_src,
|
||||||
0, is_log,
|
0, is_log,
|
||||||
first_phy_user))
|
first_phy_user))
|
||||||
goto found_phy;
|
goto found_phy;
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < num_phy_chans; i++) {
|
||||||
|
if (d40_alloc_mask_set(&phys[i], is_src,
|
||||||
|
0, is_log,
|
||||||
|
first_phy_user))
|
||||||
|
goto found_phy;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
|
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
|
||||||
@ -1954,7 +2140,6 @@ _exit:
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static u32 stedma40_residue(struct dma_chan *chan)
|
static u32 stedma40_residue(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct d40_chan *d40c =
|
struct d40_chan *d40c =
|
||||||
@ -2030,7 +2215,6 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
|
|||||||
return ret < 0 ? ret : 0;
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static struct d40_desc *
|
static struct d40_desc *
|
||||||
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
|
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
|
||||||
unsigned int sg_len, unsigned long dma_flags)
|
unsigned int sg_len, unsigned long dma_flags)
|
||||||
@ -2056,7 +2240,6 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
desc->lli_current = 0;
|
desc->lli_current = 0;
|
||||||
desc->txd.flags = dma_flags;
|
desc->txd.flags = dma_flags;
|
||||||
desc->txd.tx_submit = d40_tx_submit;
|
desc->txd.tx_submit = d40_tx_submit;
|
||||||
@ -2105,7 +2288,6 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, flags);
|
spin_lock_irqsave(&chan->lock, flags);
|
||||||
|
|
||||||
desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
|
desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
|
||||||
@ -2179,11 +2361,26 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
|
|||||||
{
|
{
|
||||||
bool realtime = d40c->dma_cfg.realtime;
|
bool realtime = d40c->dma_cfg.realtime;
|
||||||
bool highprio = d40c->dma_cfg.high_priority;
|
bool highprio = d40c->dma_cfg.high_priority;
|
||||||
u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
|
u32 rtreg;
|
||||||
u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
|
|
||||||
u32 event = D40_TYPE_TO_EVENT(dev_type);
|
u32 event = D40_TYPE_TO_EVENT(dev_type);
|
||||||
u32 group = D40_TYPE_TO_GROUP(dev_type);
|
u32 group = D40_TYPE_TO_GROUP(dev_type);
|
||||||
u32 bit = 1 << event;
|
u32 bit = 1 << event;
|
||||||
|
u32 prioreg;
|
||||||
|
struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
|
||||||
|
|
||||||
|
rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
|
||||||
|
/*
|
||||||
|
* Due to a hardware bug, in some cases a logical channel triggered by
|
||||||
|
* a high priority destination event line can generate extra packet
|
||||||
|
* transactions.
|
||||||
|
*
|
||||||
|
* The workaround is to not set the high priority level for the
|
||||||
|
* destination event lines that trigger logical channels.
|
||||||
|
*/
|
||||||
|
if (!src && chan_is_logical(d40c))
|
||||||
|
highprio = false;
|
||||||
|
|
||||||
|
prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
|
||||||
|
|
||||||
/* Destination event lines are stored in the upper halfword */
|
/* Destination event lines are stored in the upper halfword */
|
||||||
if (!src)
|
if (!src)
|
||||||
@ -2248,11 +2445,11 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
|
|
||||||
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
|
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
|
||||||
d40c->lcpa = d40c->base->lcpa_base +
|
d40c->lcpa = d40c->base->lcpa_base +
|
||||||
d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
|
d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
|
||||||
else
|
else
|
||||||
d40c->lcpa = d40c->base->lcpa_base +
|
d40c->lcpa = d40c->base->lcpa_base +
|
||||||
d40c->dma_cfg.dst_dev_type *
|
d40c->dma_cfg.dst_dev_type *
|
||||||
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
|
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
|
dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
|
||||||
@ -2287,7 +2484,6 @@ static void d40_free_chan_resources(struct dma_chan *chan)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
spin_lock_irqsave(&d40c->lock, flags);
|
spin_lock_irqsave(&d40c->lock, flags);
|
||||||
|
|
||||||
err = d40_free_dma(d40c);
|
err = d40_free_dma(d40c);
|
||||||
@ -2330,14 +2526,12 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
|
|||||||
return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
|
return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
|
static struct dma_async_tx_descriptor *
|
||||||
struct scatterlist *sgl,
|
d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
unsigned int sg_len,
|
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||||
enum dma_transfer_direction direction,
|
unsigned long dma_flags, void *context)
|
||||||
unsigned long dma_flags,
|
|
||||||
void *context)
|
|
||||||
{
|
{
|
||||||
if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
|
if (!is_slave_direction(direction))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
|
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
|
||||||
@ -2577,6 +2771,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (src_maxburst > 16) {
|
||||||
|
src_maxburst = 16;
|
||||||
|
dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
|
||||||
|
} else if (dst_maxburst > 16) {
|
||||||
|
dst_maxburst = 16;
|
||||||
|
src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
|
||||||
|
}
|
||||||
|
|
||||||
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
|
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
|
||||||
src_addr_width,
|
src_addr_width,
|
||||||
src_maxburst);
|
src_maxburst);
|
||||||
@ -2659,6 +2861,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
|
|||||||
|
|
||||||
d40c->log_num = D40_PHY_CHAN;
|
d40c->log_num = D40_PHY_CHAN;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&d40c->done);
|
||||||
INIT_LIST_HEAD(&d40c->active);
|
INIT_LIST_HEAD(&d40c->active);
|
||||||
INIT_LIST_HEAD(&d40c->queue);
|
INIT_LIST_HEAD(&d40c->queue);
|
||||||
INIT_LIST_HEAD(&d40c->pending_queue);
|
INIT_LIST_HEAD(&d40c->pending_queue);
|
||||||
@ -2773,8 +2976,6 @@ static int dma40_pm_suspend(struct device *dev)
|
|||||||
struct platform_device *pdev = to_platform_device(dev);
|
struct platform_device *pdev = to_platform_device(dev);
|
||||||
struct d40_base *base = platform_get_drvdata(pdev);
|
struct d40_base *base = platform_get_drvdata(pdev);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
if (!pm_runtime_suspended(dev))
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
if (base->lcpa_regulator)
|
if (base->lcpa_regulator)
|
||||||
ret = regulator_disable(base->lcpa_regulator);
|
ret = regulator_disable(base->lcpa_regulator);
|
||||||
@ -2882,6 +3083,13 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|||||||
num_phy_chans_avail--;
|
num_phy_chans_avail--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Mark soft_lli channels */
|
||||||
|
for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
|
||||||
|
int chan = base->plat_data->soft_lli_chans[i];
|
||||||
|
|
||||||
|
base->phy_res[chan].use_soft_lli = true;
|
||||||
|
}
|
||||||
|
|
||||||
dev_info(base->dev, "%d of %d physical DMA channels available\n",
|
dev_info(base->dev, "%d of %d physical DMA channels available\n",
|
||||||
num_phy_chans_avail, base->num_phy_chans);
|
num_phy_chans_avail, base->num_phy_chans);
|
||||||
|
|
||||||
@ -2975,14 +3183,21 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
|
|||||||
* ? has revision 1
|
* ? has revision 1
|
||||||
* DB8500v1 has revision 2
|
* DB8500v1 has revision 2
|
||||||
* DB8500v2 has revision 3
|
* DB8500v2 has revision 3
|
||||||
|
* AP9540v1 has revision 4
|
||||||
|
* DB8540v1 has revision 4
|
||||||
*/
|
*/
|
||||||
rev = AMBA_REV_BITS(pid);
|
rev = AMBA_REV_BITS(pid);
|
||||||
|
|
||||||
/* The number of physical channels on this HW */
|
plat_data = pdev->dev.platform_data;
|
||||||
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
|
|
||||||
|
|
||||||
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
|
/* The number of physical channels on this HW */
|
||||||
rev, res->start);
|
if (plat_data->num_of_phy_chans)
|
||||||
|
num_phy_chans = plat_data->num_of_phy_chans;
|
||||||
|
else
|
||||||
|
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
|
||||||
|
|
||||||
|
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
|
||||||
|
rev, res->start, num_phy_chans);
|
||||||
|
|
||||||
if (rev < 2) {
|
if (rev < 2) {
|
||||||
d40_err(&pdev->dev, "hardware revision: %d is not supported",
|
d40_err(&pdev->dev, "hardware revision: %d is not supported",
|
||||||
@ -2990,8 +3205,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
|
|||||||
goto failure;
|
goto failure;
|
||||||
}
|
}
|
||||||
|
|
||||||
plat_data = pdev->dev.platform_data;
|
|
||||||
|
|
||||||
/* Count the number of logical channels in use */
|
/* Count the number of logical channels in use */
|
||||||
for (i = 0; i < plat_data->dev_len; i++)
|
for (i = 0; i < plat_data->dev_len; i++)
|
||||||
if (plat_data->dev_rx[i] != 0)
|
if (plat_data->dev_rx[i] != 0)
|
||||||
@ -3022,6 +3235,36 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
|
|||||||
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
|
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
|
||||||
base->log_chans = &base->phy_chans[num_phy_chans];
|
base->log_chans = &base->phy_chans[num_phy_chans];
|
||||||
|
|
||||||
|
if (base->plat_data->num_of_phy_chans == 14) {
|
||||||
|
base->gen_dmac.backup = d40_backup_regs_v4b;
|
||||||
|
base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
|
||||||
|
base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
|
||||||
|
base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
|
||||||
|
base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
|
||||||
|
base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
|
||||||
|
base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
|
||||||
|
base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
|
||||||
|
base->gen_dmac.il = il_v4b;
|
||||||
|
base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
|
||||||
|
base->gen_dmac.init_reg = dma_init_reg_v4b;
|
||||||
|
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
|
||||||
|
} else {
|
||||||
|
if (base->rev >= 3) {
|
||||||
|
base->gen_dmac.backup = d40_backup_regs_v4a;
|
||||||
|
base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
|
||||||
|
}
|
||||||
|
base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
|
||||||
|
base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
|
||||||
|
base->gen_dmac.realtime_en = D40_DREG_RSEG1;
|
||||||
|
base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
|
||||||
|
base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
|
||||||
|
base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
|
||||||
|
base->gen_dmac.il = il_v4a;
|
||||||
|
base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
|
||||||
|
base->gen_dmac.init_reg = dma_init_reg_v4a;
|
||||||
|
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
|
||||||
|
}
|
||||||
|
|
||||||
base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
|
base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!base->phy_res)
|
if (!base->phy_res)
|
||||||
@ -3093,31 +3336,15 @@ failure:
|
|||||||
static void __init d40_hw_init(struct d40_base *base)
|
static void __init d40_hw_init(struct d40_base *base)
|
||||||
{
|
{
|
||||||
|
|
||||||
static struct d40_reg_val dma_init_reg[] = {
|
|
||||||
/* Clock every part of the DMA block from start */
|
|
||||||
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
|
|
||||||
|
|
||||||
/* Interrupts on all logical channels */
|
|
||||||
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
|
|
||||||
{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
|
|
||||||
};
|
|
||||||
int i;
|
int i;
|
||||||
u32 prmseo[2] = {0, 0};
|
u32 prmseo[2] = {0, 0};
|
||||||
u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
|
u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
|
||||||
u32 pcmis = 0;
|
u32 pcmis = 0;
|
||||||
u32 pcicr = 0;
|
u32 pcicr = 0;
|
||||||
|
struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
|
||||||
|
u32 reg_size = base->gen_dmac.init_reg_size;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
|
for (i = 0; i < reg_size; i++)
|
||||||
writel(dma_init_reg[i].val,
|
writel(dma_init_reg[i].val,
|
||||||
base->virtbase + dma_init_reg[i].reg);
|
base->virtbase + dma_init_reg[i].reg);
|
||||||
|
|
||||||
@ -3150,11 +3377,14 @@ static void __init d40_hw_init(struct d40_base *base)
|
|||||||
writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
|
writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
|
||||||
|
|
||||||
/* Write which interrupt to enable */
|
/* Write which interrupt to enable */
|
||||||
writel(pcmis, base->virtbase + D40_DREG_PCMIS);
|
writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
|
||||||
|
|
||||||
/* Write which interrupt to clear */
|
/* Write which interrupt to clear */
|
||||||
writel(pcicr, base->virtbase + D40_DREG_PCICR);
|
writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
|
||||||
|
|
||||||
|
/* These are __initdata and cannot be accessed after init */
|
||||||
|
base->gen_dmac.init_reg = NULL;
|
||||||
|
base->gen_dmac.init_reg_size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init d40_lcla_allocate(struct d40_base *base)
|
static int __init d40_lcla_allocate(struct d40_base *base)
|
||||||
@ -3362,6 +3592,13 @@ static int __init d40_probe(struct platform_device *pdev)
|
|||||||
if (err)
|
if (err)
|
||||||
goto failure;
|
goto failure;
|
||||||
|
|
||||||
|
base->dev->dma_parms = &base->dma_parms;
|
||||||
|
err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
|
||||||
|
if (err) {
|
||||||
|
d40_err(&pdev->dev, "Failed to set dma max seg size\n");
|
||||||
|
goto failure;
|
||||||
|
}
|
||||||
|
|
||||||
d40_hw_init(base);
|
d40_hw_init(base);
|
||||||
|
|
||||||
dev_info(base->dev, "initialized\n");
|
dev_info(base->dev, "initialized\n");
|
||||||
@ -3397,7 +3634,7 @@ failure:
|
|||||||
release_mem_region(base->phy_start,
|
release_mem_region(base->phy_start,
|
||||||
base->phy_size);
|
base->phy_size);
|
||||||
if (base->clk) {
|
if (base->clk) {
|
||||||
clk_disable(base->clk);
|
clk_disable_unprepare(base->clk);
|
||||||
clk_put(base->clk);
|
clk_put(base->clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,17 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
|
|||||||
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
|
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
|
||||||
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
|
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
|
||||||
|
|
||||||
|
/* Set the priority bit to high for the physical channel */
|
||||||
|
if (cfg->high_priority) {
|
||||||
|
src |= 1 << D40_SREG_CFG_PRI_POS;
|
||||||
|
dst |= 1 << D40_SREG_CFG_PRI_POS;
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
/* Logical channel */
|
/* Logical channel */
|
||||||
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
|
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
|
||||||
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
|
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cfg->high_priority) {
|
|
||||||
src |= 1 << D40_SREG_CFG_PRI_POS;
|
|
||||||
dst |= 1 << D40_SREG_CFG_PRI_POS;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cfg->src_info.big_endian)
|
if (cfg->src_info.big_endian)
|
||||||
src |= 1 << D40_SREG_CFG_LBE_POS;
|
src |= 1 << D40_SREG_CFG_LBE_POS;
|
||||||
if (cfg->dst_info.big_endian)
|
if (cfg->dst_info.big_endian)
|
||||||
@ -250,7 +251,7 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
|
|||||||
|
|
||||||
return lli;
|
return lli;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -331,10 +332,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
|
|||||||
{
|
{
|
||||||
d40_log_lli_link(lli_dst, lli_src, next, flags);
|
d40_log_lli_link(lli_dst, lli_src, next, flags);
|
||||||
|
|
||||||
writel(lli_src->lcsp02, &lcpa[0].lcsp0);
|
writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
|
||||||
writel(lli_src->lcsp13, &lcpa[0].lcsp1);
|
writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
|
||||||
writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
|
writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
|
||||||
writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
|
writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
|
||||||
}
|
}
|
||||||
|
|
||||||
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
|
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
|
||||||
@ -344,10 +345,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
|
|||||||
{
|
{
|
||||||
d40_log_lli_link(lli_dst, lli_src, next, flags);
|
d40_log_lli_link(lli_dst, lli_src, next, flags);
|
||||||
|
|
||||||
writel(lli_src->lcsp02, &lcla[0].lcsp02);
|
writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
|
||||||
writel(lli_src->lcsp13, &lcla[0].lcsp13);
|
writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
|
||||||
writel(lli_dst->lcsp02, &lcla[1].lcsp02);
|
writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
|
||||||
writel(lli_dst->lcsp13, &lcla[1].lcsp13);
|
writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void d40_log_fill_lli(struct d40_log_lli *lli,
|
static void d40_log_fill_lli(struct d40_log_lli *lli,
|
||||||
|
@ -125,7 +125,7 @@
|
|||||||
#define D40_DREG_GCC 0x000
|
#define D40_DREG_GCC 0x000
|
||||||
#define D40_DREG_GCC_ENA 0x1
|
#define D40_DREG_GCC_ENA 0x1
|
||||||
/* This assumes that there are only 4 event groups */
|
/* This assumes that there are only 4 event groups */
|
||||||
#define D40_DREG_GCC_ENABLE_ALL 0xff01
|
#define D40_DREG_GCC_ENABLE_ALL 0x3ff01
|
||||||
#define D40_DREG_GCC_EVTGRP_POS 8
|
#define D40_DREG_GCC_EVTGRP_POS 8
|
||||||
#define D40_DREG_GCC_SRC 0
|
#define D40_DREG_GCC_SRC 0
|
||||||
#define D40_DREG_GCC_DST 1
|
#define D40_DREG_GCC_DST 1
|
||||||
@ -148,14 +148,31 @@
|
|||||||
|
|
||||||
#define D40_DREG_LCPA 0x020
|
#define D40_DREG_LCPA 0x020
|
||||||
#define D40_DREG_LCLA 0x024
|
#define D40_DREG_LCLA 0x024
|
||||||
|
|
||||||
|
#define D40_DREG_SSEG1 0x030
|
||||||
|
#define D40_DREG_SSEG2 0x034
|
||||||
|
#define D40_DREG_SSEG3 0x038
|
||||||
|
#define D40_DREG_SSEG4 0x03C
|
||||||
|
|
||||||
|
#define D40_DREG_SCEG1 0x040
|
||||||
|
#define D40_DREG_SCEG2 0x044
|
||||||
|
#define D40_DREG_SCEG3 0x048
|
||||||
|
#define D40_DREG_SCEG4 0x04C
|
||||||
|
|
||||||
#define D40_DREG_ACTIVE 0x050
|
#define D40_DREG_ACTIVE 0x050
|
||||||
#define D40_DREG_ACTIVO 0x054
|
#define D40_DREG_ACTIVO 0x054
|
||||||
#define D40_DREG_FSEB1 0x058
|
#define D40_DREG_CIDMOD 0x058
|
||||||
#define D40_DREG_FSEB2 0x05C
|
#define D40_DREG_TCIDV 0x05C
|
||||||
#define D40_DREG_PCMIS 0x060
|
#define D40_DREG_PCMIS 0x060
|
||||||
#define D40_DREG_PCICR 0x064
|
#define D40_DREG_PCICR 0x064
|
||||||
#define D40_DREG_PCTIS 0x068
|
#define D40_DREG_PCTIS 0x068
|
||||||
#define D40_DREG_PCEIS 0x06C
|
#define D40_DREG_PCEIS 0x06C
|
||||||
|
|
||||||
|
#define D40_DREG_SPCMIS 0x070
|
||||||
|
#define D40_DREG_SPCICR 0x074
|
||||||
|
#define D40_DREG_SPCTIS 0x078
|
||||||
|
#define D40_DREG_SPCEIS 0x07C
|
||||||
|
|
||||||
#define D40_DREG_LCMIS0 0x080
|
#define D40_DREG_LCMIS0 0x080
|
||||||
#define D40_DREG_LCMIS1 0x084
|
#define D40_DREG_LCMIS1 0x084
|
||||||
#define D40_DREG_LCMIS2 0x088
|
#define D40_DREG_LCMIS2 0x088
|
||||||
@ -172,6 +189,33 @@
|
|||||||
#define D40_DREG_LCEIS1 0x0B4
|
#define D40_DREG_LCEIS1 0x0B4
|
||||||
#define D40_DREG_LCEIS2 0x0B8
|
#define D40_DREG_LCEIS2 0x0B8
|
||||||
#define D40_DREG_LCEIS3 0x0BC
|
#define D40_DREG_LCEIS3 0x0BC
|
||||||
|
|
||||||
|
#define D40_DREG_SLCMIS1 0x0C0
|
||||||
|
#define D40_DREG_SLCMIS2 0x0C4
|
||||||
|
#define D40_DREG_SLCMIS3 0x0C8
|
||||||
|
#define D40_DREG_SLCMIS4 0x0CC
|
||||||
|
|
||||||
|
#define D40_DREG_SLCICR1 0x0D0
|
||||||
|
#define D40_DREG_SLCICR2 0x0D4
|
||||||
|
#define D40_DREG_SLCICR3 0x0D8
|
||||||
|
#define D40_DREG_SLCICR4 0x0DC
|
||||||
|
|
||||||
|
#define D40_DREG_SLCTIS1 0x0E0
|
||||||
|
#define D40_DREG_SLCTIS2 0x0E4
|
||||||
|
#define D40_DREG_SLCTIS3 0x0E8
|
||||||
|
#define D40_DREG_SLCTIS4 0x0EC
|
||||||
|
|
||||||
|
#define D40_DREG_SLCEIS1 0x0F0
|
||||||
|
#define D40_DREG_SLCEIS2 0x0F4
|
||||||
|
#define D40_DREG_SLCEIS3 0x0F8
|
||||||
|
#define D40_DREG_SLCEIS4 0x0FC
|
||||||
|
|
||||||
|
#define D40_DREG_FSESS1 0x100
|
||||||
|
#define D40_DREG_FSESS2 0x104
|
||||||
|
|
||||||
|
#define D40_DREG_FSEBS1 0x108
|
||||||
|
#define D40_DREG_FSEBS2 0x10C
|
||||||
|
|
||||||
#define D40_DREG_PSEG1 0x110
|
#define D40_DREG_PSEG1 0x110
|
||||||
#define D40_DREG_PSEG2 0x114
|
#define D40_DREG_PSEG2 0x114
|
||||||
#define D40_DREG_PSEG3 0x118
|
#define D40_DREG_PSEG3 0x118
|
||||||
@ -188,6 +232,86 @@
|
|||||||
#define D40_DREG_RCEG2 0x144
|
#define D40_DREG_RCEG2 0x144
|
||||||
#define D40_DREG_RCEG3 0x148
|
#define D40_DREG_RCEG3 0x148
|
||||||
#define D40_DREG_RCEG4 0x14C
|
#define D40_DREG_RCEG4 0x14C
|
||||||
|
|
||||||
|
#define D40_DREG_PREFOT 0x15C
|
||||||
|
#define D40_DREG_EXTCFG 0x160
|
||||||
|
|
||||||
|
#define D40_DREG_CPSEG1 0x200
|
||||||
|
#define D40_DREG_CPSEG2 0x204
|
||||||
|
#define D40_DREG_CPSEG3 0x208
|
||||||
|
#define D40_DREG_CPSEG4 0x20C
|
||||||
|
#define D40_DREG_CPSEG5 0x210
|
||||||
|
|
||||||
|
#define D40_DREG_CPCEG1 0x220
|
||||||
|
#define D40_DREG_CPCEG2 0x224
|
||||||
|
#define D40_DREG_CPCEG3 0x228
|
||||||
|
#define D40_DREG_CPCEG4 0x22C
|
||||||
|
#define D40_DREG_CPCEG5 0x230
|
||||||
|
|
||||||
|
#define D40_DREG_CRSEG1 0x240
|
||||||
|
#define D40_DREG_CRSEG2 0x244
|
||||||
|
#define D40_DREG_CRSEG3 0x248
|
||||||
|
#define D40_DREG_CRSEG4 0x24C
|
||||||
|
#define D40_DREG_CRSEG5 0x250
|
||||||
|
|
||||||
|
#define D40_DREG_CRCEG1 0x260
|
||||||
|
#define D40_DREG_CRCEG2 0x264
|
||||||
|
#define D40_DREG_CRCEG3 0x268
|
||||||
|
#define D40_DREG_CRCEG4 0x26C
|
||||||
|
#define D40_DREG_CRCEG5 0x270
|
||||||
|
|
||||||
|
#define D40_DREG_CFSESS1 0x280
|
||||||
|
#define D40_DREG_CFSESS2 0x284
|
||||||
|
#define D40_DREG_CFSESS3 0x288
|
||||||
|
|
||||||
|
#define D40_DREG_CFSEBS1 0x290
|
||||||
|
#define D40_DREG_CFSEBS2 0x294
|
||||||
|
#define D40_DREG_CFSEBS3 0x298
|
||||||
|
|
||||||
|
#define D40_DREG_CLCMIS1 0x300
|
||||||
|
#define D40_DREG_CLCMIS2 0x304
|
||||||
|
#define D40_DREG_CLCMIS3 0x308
|
||||||
|
#define D40_DREG_CLCMIS4 0x30C
|
||||||
|
#define D40_DREG_CLCMIS5 0x310
|
||||||
|
|
||||||
|
#define D40_DREG_CLCICR1 0x320
|
||||||
|
#define D40_DREG_CLCICR2 0x324
|
||||||
|
#define D40_DREG_CLCICR3 0x328
|
||||||
|
#define D40_DREG_CLCICR4 0x32C
|
||||||
|
#define D40_DREG_CLCICR5 0x330
|
||||||
|
|
||||||
|
#define D40_DREG_CLCTIS1 0x340
|
||||||
|
#define D40_DREG_CLCTIS2 0x344
|
||||||
|
#define D40_DREG_CLCTIS3 0x348
|
||||||
|
#define D40_DREG_CLCTIS4 0x34C
|
||||||
|
#define D40_DREG_CLCTIS5 0x350
|
||||||
|
|
||||||
|
#define D40_DREG_CLCEIS1 0x360
|
||||||
|
#define D40_DREG_CLCEIS2 0x364
|
||||||
|
#define D40_DREG_CLCEIS3 0x368
|
||||||
|
#define D40_DREG_CLCEIS4 0x36C
|
||||||
|
#define D40_DREG_CLCEIS5 0x370
|
||||||
|
|
||||||
|
#define D40_DREG_CPCMIS 0x380
|
||||||
|
#define D40_DREG_CPCICR 0x384
|
||||||
|
#define D40_DREG_CPCTIS 0x388
|
||||||
|
#define D40_DREG_CPCEIS 0x38C
|
||||||
|
|
||||||
|
#define D40_DREG_SCCIDA1 0xE80
|
||||||
|
#define D40_DREG_SCCIDA2 0xE90
|
||||||
|
#define D40_DREG_SCCIDA3 0xEA0
|
||||||
|
#define D40_DREG_SCCIDA4 0xEB0
|
||||||
|
#define D40_DREG_SCCIDA5 0xEC0
|
||||||
|
|
||||||
|
#define D40_DREG_SCCIDB1 0xE84
|
||||||
|
#define D40_DREG_SCCIDB2 0xE94
|
||||||
|
#define D40_DREG_SCCIDB3 0xEA4
|
||||||
|
#define D40_DREG_SCCIDB4 0xEB4
|
||||||
|
#define D40_DREG_SCCIDB5 0xEC4
|
||||||
|
|
||||||
|
#define D40_DREG_PRSCCIDA 0xF80
|
||||||
|
#define D40_DREG_PRSCCIDB 0xF84
|
||||||
|
|
||||||
#define D40_DREG_STFU 0xFC8
|
#define D40_DREG_STFU 0xFC8
|
||||||
#define D40_DREG_ICFG 0xFCC
|
#define D40_DREG_ICFG 0xFCC
|
||||||
#define D40_DREG_PERIPHID0 0xFE0
|
#define D40_DREG_PERIPHID0 0xFE0
|
||||||
|
@ -63,6 +63,9 @@
|
|||||||
#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
|
#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
|
||||||
#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
|
#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
|
||||||
|
|
||||||
|
#define TEGRA_APBDMA_CHAN_CSRE 0x00C
|
||||||
|
#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
|
||||||
|
|
||||||
/* AHB memory address */
|
/* AHB memory address */
|
||||||
#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
|
#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
|
||||||
|
|
||||||
@ -113,10 +116,12 @@ struct tegra_dma;
|
|||||||
* tegra_dma_chip_data Tegra chip specific DMA data
|
* tegra_dma_chip_data Tegra chip specific DMA data
|
||||||
* @nr_channels: Number of channels available in the controller.
|
* @nr_channels: Number of channels available in the controller.
|
||||||
* @max_dma_count: Maximum DMA transfer count supported by DMA controller.
|
* @max_dma_count: Maximum DMA transfer count supported by DMA controller.
|
||||||
|
* @support_channel_pause: Support channel wise pause of dma.
|
||||||
*/
|
*/
|
||||||
struct tegra_dma_chip_data {
|
struct tegra_dma_chip_data {
|
||||||
int nr_channels;
|
int nr_channels;
|
||||||
int max_dma_count;
|
int max_dma_count;
|
||||||
|
bool support_channel_pause;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* DMA channel registers */
|
/* DMA channel registers */
|
||||||
@ -355,6 +360,32 @@ static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
|
|||||||
spin_unlock(&tdma->global_lock);
|
spin_unlock(&tdma->global_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tegra_dma_pause(struct tegra_dma_channel *tdc,
|
||||||
|
bool wait_for_burst_complete)
|
||||||
|
{
|
||||||
|
struct tegra_dma *tdma = tdc->tdma;
|
||||||
|
|
||||||
|
if (tdma->chip_data->support_channel_pause) {
|
||||||
|
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
|
||||||
|
TEGRA_APBDMA_CHAN_CSRE_PAUSE);
|
||||||
|
if (wait_for_burst_complete)
|
||||||
|
udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
|
||||||
|
} else {
|
||||||
|
tegra_dma_global_pause(tdc, wait_for_burst_complete);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tegra_dma_resume(struct tegra_dma_channel *tdc)
|
||||||
|
{
|
||||||
|
struct tegra_dma *tdma = tdc->tdma;
|
||||||
|
|
||||||
|
if (tdma->chip_data->support_channel_pause) {
|
||||||
|
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
|
||||||
|
} else {
|
||||||
|
tegra_dma_global_resume(tdc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void tegra_dma_stop(struct tegra_dma_channel *tdc)
|
static void tegra_dma_stop(struct tegra_dma_channel *tdc)
|
||||||
{
|
{
|
||||||
u32 csr;
|
u32 csr;
|
||||||
@ -410,7 +441,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
|
|||||||
* If there is already IEC status then interrupt handler need to
|
* If there is already IEC status then interrupt handler need to
|
||||||
* load new configuration.
|
* load new configuration.
|
||||||
*/
|
*/
|
||||||
tegra_dma_global_pause(tdc, false);
|
tegra_dma_pause(tdc, false);
|
||||||
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -420,7 +451,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
|
|||||||
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
||||||
dev_err(tdc2dev(tdc),
|
dev_err(tdc2dev(tdc),
|
||||||
"Skipping new configuration as interrupt is pending\n");
|
"Skipping new configuration as interrupt is pending\n");
|
||||||
tegra_dma_global_resume(tdc);
|
tegra_dma_resume(tdc);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,7 +462,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
|
|||||||
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
|
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
|
||||||
nsg_req->configured = true;
|
nsg_req->configured = true;
|
||||||
|
|
||||||
tegra_dma_global_resume(tdc);
|
tegra_dma_resume(tdc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tdc_start_head_req(struct tegra_dma_channel *tdc)
|
static void tdc_start_head_req(struct tegra_dma_channel *tdc)
|
||||||
@ -692,7 +723,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
|
|||||||
goto skip_dma_stop;
|
goto skip_dma_stop;
|
||||||
|
|
||||||
/* Pause DMA before checking the queue status */
|
/* Pause DMA before checking the queue status */
|
||||||
tegra_dma_global_pause(tdc, true);
|
tegra_dma_pause(tdc, true);
|
||||||
|
|
||||||
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
||||||
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
||||||
@ -710,7 +741,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
|
|||||||
sgreq->dma_desc->bytes_transferred +=
|
sgreq->dma_desc->bytes_transferred +=
|
||||||
get_current_xferred_count(tdc, sgreq, status);
|
get_current_xferred_count(tdc, sgreq, status);
|
||||||
}
|
}
|
||||||
tegra_dma_global_resume(tdc);
|
tegra_dma_resume(tdc);
|
||||||
|
|
||||||
skip_dma_stop:
|
skip_dma_stop:
|
||||||
tegra_dma_abort_all(tdc);
|
tegra_dma_abort_all(tdc);
|
||||||
@ -738,7 +769,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
|
|||||||
|
|
||||||
ret = dma_cookie_status(dc, cookie, txstate);
|
ret = dma_cookie_status(dc, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS) {
|
if (ret == DMA_SUCCESS) {
|
||||||
dma_set_residue(txstate, 0);
|
|
||||||
spin_unlock_irqrestore(&tdc->lock, flags);
|
spin_unlock_irqrestore(&tdc->lock, flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1180,6 +1210,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
|
|||||||
static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
|
static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
|
||||||
.nr_channels = 16,
|
.nr_channels = 16,
|
||||||
.max_dma_count = 1024UL * 64,
|
.max_dma_count = 1024UL * 64,
|
||||||
|
.support_channel_pause = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_OF)
|
#if defined(CONFIG_OF)
|
||||||
@ -1187,10 +1218,22 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
|
|||||||
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
|
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
|
||||||
.nr_channels = 32,
|
.nr_channels = 32,
|
||||||
.max_dma_count = 1024UL * 64,
|
.max_dma_count = 1024UL * 64,
|
||||||
|
.support_channel_pause = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Tegra114 specific DMA controller information */
|
||||||
|
static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
|
||||||
|
.nr_channels = 32,
|
||||||
|
.max_dma_count = 1024UL * 64,
|
||||||
|
.support_channel_pause = true,
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
static const struct of_device_id tegra_dma_of_match[] = {
|
static const struct of_device_id tegra_dma_of_match[] = {
|
||||||
{
|
{
|
||||||
|
.compatible = "nvidia,tegra114-apbdma",
|
||||||
|
.data = &tegra114_dma_chip_data,
|
||||||
|
}, {
|
||||||
.compatible = "nvidia,tegra30-apbdma",
|
.compatible = "nvidia,tegra30-apbdma",
|
||||||
.data = &tegra30_dma_chip_data,
|
.data = &tegra30_dma_chip_data,
|
||||||
}, {
|
}, {
|
||||||
|
@ -546,7 +546,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
|
|||||||
goto out_dma_unmap;
|
goto out_dma_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_async_memcpy_issue_pending(chan);
|
dma_async_issue_pending(chan);
|
||||||
|
|
||||||
/* Set the total byte count */
|
/* Set the total byte count */
|
||||||
fpga_set_byte_count(priv->regs, priv->bytes);
|
fpga_set_byte_count(priv->regs, priv->bytes);
|
||||||
|
@ -631,6 +631,8 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
|
|||||||
struct dma_async_tx_descriptor *tx;
|
struct dma_async_tx_descriptor *tx;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
dma_addr_t dst, src;
|
dma_addr_t dst, src;
|
||||||
|
unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP |
|
||||||
|
DMA_COMPL_SKIP_SRC_UNMAP;
|
||||||
|
|
||||||
dst_sg = buf->vb.sglist;
|
dst_sg = buf->vb.sglist;
|
||||||
dst_nents = buf->vb.sglen;
|
dst_nents = buf->vb.sglen;
|
||||||
@ -666,7 +668,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
|
|||||||
src = SYS_FPGA_BLOCK;
|
src = SYS_FPGA_BLOCK;
|
||||||
tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
|
tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
|
||||||
REG_BLOCK_SIZE,
|
REG_BLOCK_SIZE,
|
||||||
0);
|
dma_flags);
|
||||||
if (!tx) {
|
if (!tx) {
|
||||||
dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
|
dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -749,7 +751,7 @@ static irqreturn_t data_irq(int irq, void *dev_id)
|
|||||||
submitted = true;
|
submitted = true;
|
||||||
|
|
||||||
/* Start the DMA Engine */
|
/* Start the DMA Engine */
|
||||||
dma_async_memcpy_issue_pending(priv->chan);
|
dma_async_issue_pending(priv->chan);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/* If no DMA was submitted, re-enable interrupts */
|
/* If no DMA was submitted, re-enable interrupts */
|
||||||
|
@ -573,23 +573,22 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
|
|||||||
dma_dev = chan->device;
|
dma_dev = chan->device;
|
||||||
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
|
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
|
||||||
|
|
||||||
|
flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
|
||||||
|
|
||||||
if (direction == DMA_TO_DEVICE) {
|
if (direction == DMA_TO_DEVICE) {
|
||||||
dma_src = dma_addr;
|
dma_src = dma_addr;
|
||||||
dma_dst = host->data_pa;
|
dma_dst = host->data_pa;
|
||||||
flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
|
|
||||||
} else {
|
} else {
|
||||||
dma_src = host->data_pa;
|
dma_src = host->data_pa;
|
||||||
dma_dst = dma_addr;
|
dma_dst = dma_addr;
|
||||||
flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
|
tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
|
||||||
len, flags);
|
len, flags);
|
||||||
|
|
||||||
if (!tx) {
|
if (!tx) {
|
||||||
dev_err(host->dev, "device_prep_dma_memcpy error\n");
|
dev_err(host->dev, "device_prep_dma_memcpy error\n");
|
||||||
dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
|
ret = -EIO;
|
||||||
return -EIO;
|
goto unmap_dma;
|
||||||
}
|
}
|
||||||
|
|
||||||
tx->callback = dma_complete;
|
tx->callback = dma_complete;
|
||||||
@ -599,7 +598,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
|
|||||||
ret = dma_submit_error(cookie);
|
ret = dma_submit_error(cookie);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(host->dev, "dma_submit_error %d\n", cookie);
|
dev_err(host->dev, "dma_submit_error %d\n", cookie);
|
||||||
return ret;
|
goto unmap_dma;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_async_issue_pending(chan);
|
dma_async_issue_pending(chan);
|
||||||
@ -610,10 +609,17 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
|
|||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
|
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
|
||||||
dev_err(host->dev, "wait_for_completion_timeout\n");
|
dev_err(host->dev, "wait_for_completion_timeout\n");
|
||||||
return ret ? ret : -ETIMEDOUT;
|
if (!ret)
|
||||||
|
ret = -ETIMEDOUT;
|
||||||
|
goto unmap_dma;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
ret = 0;
|
||||||
|
|
||||||
|
unmap_dma:
|
||||||
|
dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/* arch/arm/include/asm/hardware/pl080.h
|
/* include/linux/amba/pl080.h
|
||||||
*
|
*
|
||||||
* Copyright 2008 Openmoko, Inc.
|
* Copyright 2008 Openmoko, Inc.
|
||||||
* Copyright 2008 Simtec Electronics
|
* Copyright 2008 Simtec Electronics
|
@ -608,7 +608,10 @@ static inline int dmaengine_device_control(struct dma_chan *chan,
|
|||||||
enum dma_ctrl_cmd cmd,
|
enum dma_ctrl_cmd cmd,
|
||||||
unsigned long arg)
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
return chan->device->device_control(chan, cmd, arg);
|
if (chan->device->device_control)
|
||||||
|
return chan->device->device_control(chan, cmd, arg);
|
||||||
|
|
||||||
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dmaengine_slave_config(struct dma_chan *chan,
|
static inline int dmaengine_slave_config(struct dma_chan *chan,
|
||||||
@ -618,6 +621,11 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
|
|||||||
(unsigned long)config);
|
(unsigned long)config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_slave_direction(enum dma_transfer_direction direction)
|
||||||
|
{
|
||||||
|
return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
|
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
|
||||||
struct dma_chan *chan, dma_addr_t buf, size_t len,
|
struct dma_chan *chan, dma_addr_t buf, size_t len,
|
||||||
enum dma_transfer_direction dir, unsigned long flags)
|
enum dma_transfer_direction dir, unsigned long flags)
|
||||||
@ -660,6 +668,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
|
|||||||
period_len, dir, flags, NULL);
|
period_len, dir, flags, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
|
||||||
|
struct dma_chan *chan, struct dma_interleaved_template *xt,
|
||||||
|
unsigned long flags)
|
||||||
|
{
|
||||||
|
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
|
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
|
||||||
@ -849,20 +864,6 @@ static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
|
|||||||
return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
|
return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define first_dma_cap(mask) __first_dma_cap(&(mask))
|
|
||||||
static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
|
|
||||||
{
|
|
||||||
return min_t(int, DMA_TX_TYPE_END,
|
|
||||||
find_first_bit(srcp->bits, DMA_TX_TYPE_END));
|
|
||||||
}
|
|
||||||
|
|
||||||
#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
|
|
||||||
static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
|
|
||||||
{
|
|
||||||
return min_t(int, DMA_TX_TYPE_END,
|
|
||||||
find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
|
#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
|
||||||
static inline void
|
static inline void
|
||||||
__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
|
__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
|
||||||
@ -891,9 +892,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define for_each_dma_cap_mask(cap, mask) \
|
#define for_each_dma_cap_mask(cap, mask) \
|
||||||
for ((cap) = first_dma_cap(mask); \
|
for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
|
||||||
(cap) < DMA_TX_TYPE_END; \
|
|
||||||
(cap) = next_dma_cap((cap), (mask)))
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_async_issue_pending - flush pending transactions to HW
|
* dma_async_issue_pending - flush pending transactions to HW
|
||||||
@ -907,8 +906,6 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
|
|||||||
chan->device->device_issue_pending(chan);
|
chan->device->device_issue_pending(chan);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_async_is_tx_complete - poll for transaction completion
|
* dma_async_is_tx_complete - poll for transaction completion
|
||||||
* @chan: DMA channel
|
* @chan: DMA channel
|
||||||
@ -934,16 +931,13 @@ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_async_memcpy_complete(chan, cookie, last, used)\
|
|
||||||
dma_async_is_tx_complete(chan, cookie, last, used)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_async_is_complete - test a cookie against chan state
|
* dma_async_is_complete - test a cookie against chan state
|
||||||
* @cookie: transaction identifier to test status of
|
* @cookie: transaction identifier to test status of
|
||||||
* @last_complete: last know completed transaction
|
* @last_complete: last know completed transaction
|
||||||
* @last_used: last cookie value handed out
|
* @last_used: last cookie value handed out
|
||||||
*
|
*
|
||||||
* dma_async_is_complete() is used in dma_async_memcpy_complete()
|
* dma_async_is_complete() is used in dma_async_is_tx_complete()
|
||||||
* the test logic is separated for lightweight testing of multiple cookies
|
* the test logic is separated for lightweight testing of multiple cookies
|
||||||
*/
|
*/
|
||||||
static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
|
static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
|
||||||
@ -974,6 +968,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
|
|||||||
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
|
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
|
||||||
void dma_issue_pending_all(void);
|
void dma_issue_pending_all(void);
|
||||||
struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
|
struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
|
||||||
|
struct dma_chan *dma_request_slave_channel(struct device *dev, char *name);
|
||||||
void dma_release_channel(struct dma_chan *chan);
|
void dma_release_channel(struct dma_chan *chan);
|
||||||
#else
|
#else
|
||||||
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
||||||
@ -988,6 +983,11 @@ static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
|
|||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
|
||||||
|
char *name)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
static inline void dma_release_channel(struct dma_chan *chan)
|
static inline void dma_release_channel(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -14,15 +14,39 @@
|
|||||||
|
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dw_dma_slave - Controller-specific information about a slave
|
||||||
|
*
|
||||||
|
* @dma_dev: required DMA master device. Depricated.
|
||||||
|
* @bus_id: name of this device channel, not just a device name since
|
||||||
|
* devices may have more than one channel e.g. "foo_tx"
|
||||||
|
* @cfg_hi: Platform-specific initializer for the CFG_HI register
|
||||||
|
* @cfg_lo: Platform-specific initializer for the CFG_LO register
|
||||||
|
* @src_master: src master for transfers on allocated channel.
|
||||||
|
* @dst_master: dest master for transfers on allocated channel.
|
||||||
|
*/
|
||||||
|
struct dw_dma_slave {
|
||||||
|
struct device *dma_dev;
|
||||||
|
const char *bus_id;
|
||||||
|
u32 cfg_hi;
|
||||||
|
u32 cfg_lo;
|
||||||
|
u8 src_master;
|
||||||
|
u8 dst_master;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct dw_dma_platform_data - Controller configuration parameters
|
* struct dw_dma_platform_data - Controller configuration parameters
|
||||||
* @nr_channels: Number of channels supported by hardware (max 8)
|
* @nr_channels: Number of channels supported by hardware (max 8)
|
||||||
* @is_private: The device channels should be marked as private and not for
|
* @is_private: The device channels should be marked as private and not for
|
||||||
* by the general purpose DMA channel allocator.
|
* by the general purpose DMA channel allocator.
|
||||||
|
* @chan_allocation_order: Allocate channels starting from 0 or 7
|
||||||
|
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
|
||||||
* @block_size: Maximum block size supported by the controller
|
* @block_size: Maximum block size supported by the controller
|
||||||
* @nr_masters: Number of AHB masters supported by the controller
|
* @nr_masters: Number of AHB masters supported by the controller
|
||||||
* @data_width: Maximum data width supported by hardware per AHB master
|
* @data_width: Maximum data width supported by hardware per AHB master
|
||||||
* (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
|
* (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
|
||||||
|
* @sd: slave specific data. Used for configuring channels
|
||||||
|
* @sd_count: count of slave data structures passed.
|
||||||
*/
|
*/
|
||||||
struct dw_dma_platform_data {
|
struct dw_dma_platform_data {
|
||||||
unsigned int nr_channels;
|
unsigned int nr_channels;
|
||||||
@ -36,6 +60,9 @@ struct dw_dma_platform_data {
|
|||||||
unsigned short block_size;
|
unsigned short block_size;
|
||||||
unsigned char nr_masters;
|
unsigned char nr_masters;
|
||||||
unsigned char data_width[4];
|
unsigned char data_width[4];
|
||||||
|
|
||||||
|
struct dw_dma_slave *sd;
|
||||||
|
unsigned int sd_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* bursts size */
|
/* bursts size */
|
||||||
@ -50,23 +77,6 @@ enum dw_dma_msize {
|
|||||||
DW_DMA_MSIZE_256,
|
DW_DMA_MSIZE_256,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* struct dw_dma_slave - Controller-specific information about a slave
|
|
||||||
*
|
|
||||||
* @dma_dev: required DMA master device
|
|
||||||
* @cfg_hi: Platform-specific initializer for the CFG_HI register
|
|
||||||
* @cfg_lo: Platform-specific initializer for the CFG_LO register
|
|
||||||
* @src_master: src master for transfers on allocated channel.
|
|
||||||
* @dst_master: dest master for transfers on allocated channel.
|
|
||||||
*/
|
|
||||||
struct dw_dma_slave {
|
|
||||||
struct device *dma_dev;
|
|
||||||
u32 cfg_hi;
|
|
||||||
u32 cfg_lo;
|
|
||||||
u8 src_master;
|
|
||||||
u8 dst_master;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Platform-configurable bits in CFG_HI */
|
/* Platform-configurable bits in CFG_HI */
|
||||||
#define DWC_CFGH_FCMODE (1 << 0)
|
#define DWC_CFGH_FCMODE (1 << 0)
|
||||||
#define DWC_CFGH_FIFO_MODE (1 << 1)
|
#define DWC_CFGH_FIFO_MODE (1 << 1)
|
||||||
@ -104,5 +114,6 @@ void dw_dma_cyclic_stop(struct dma_chan *chan);
|
|||||||
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
|
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
|
||||||
|
|
||||||
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
|
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
|
||||||
|
bool dw_dma_generic_filter(struct dma_chan *chan, void *param);
|
||||||
|
|
||||||
#endif /* DW_DMAC_H */
|
#endif /* DW_DMAC_H */
|
||||||
|
74
include/linux/of_dma.h
Normal file
74
include/linux/of_dma.h
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
/*
|
||||||
|
* OF helpers for DMA request / controller
|
||||||
|
*
|
||||||
|
* Based on of_gpio.h
|
||||||
|
*
|
||||||
|
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __LINUX_OF_DMA_H
|
||||||
|
#define __LINUX_OF_DMA_H
|
||||||
|
|
||||||
|
#include <linux/of.h>
|
||||||
|
#include <linux/dmaengine.h>
|
||||||
|
|
||||||
|
struct device_node;
|
||||||
|
|
||||||
|
struct of_dma {
|
||||||
|
struct list_head of_dma_controllers;
|
||||||
|
struct device_node *of_node;
|
||||||
|
int of_dma_nbcells;
|
||||||
|
struct dma_chan *(*of_dma_xlate)
|
||||||
|
(struct of_phandle_args *, struct of_dma *);
|
||||||
|
void *of_dma_data;
|
||||||
|
int use_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct of_dma_filter_info {
|
||||||
|
dma_cap_mask_t dma_cap;
|
||||||
|
dma_filter_fn filter_fn;
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_OF
|
||||||
|
extern int of_dma_controller_register(struct device_node *np,
|
||||||
|
struct dma_chan *(*of_dma_xlate)
|
||||||
|
(struct of_phandle_args *, struct of_dma *),
|
||||||
|
void *data);
|
||||||
|
extern int of_dma_controller_free(struct device_node *np);
|
||||||
|
extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||||||
|
char *name);
|
||||||
|
extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||||||
|
struct of_dma *ofdma);
|
||||||
|
#else
|
||||||
|
static inline int of_dma_controller_register(struct device_node *np,
|
||||||
|
struct dma_chan *(*of_dma_xlate)
|
||||||
|
(struct of_phandle_args *, struct of_dma *),
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int of_dma_controller_free(struct device_node *np)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||||||
|
char *name)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||||||
|
struct of_dma *ofdma)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* __LINUX_OF_DMA_H */
|
@ -147,6 +147,16 @@ struct stedma40_chan_cfg {
|
|||||||
* @memcpy_conf_log: default configuration of logical channel memcpy
|
* @memcpy_conf_log: default configuration of logical channel memcpy
|
||||||
* @disabled_channels: A vector, ending with -1, that marks physical channels
|
* @disabled_channels: A vector, ending with -1, that marks physical channels
|
||||||
* that are for different reasons not available for the driver.
|
* that are for different reasons not available for the driver.
|
||||||
|
* @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
|
||||||
|
* which avoids HW bug that exists in some versions of the controller.
|
||||||
|
* SoftLLI introduces relink overhead that could impact performace for
|
||||||
|
* certain use cases.
|
||||||
|
* @num_of_soft_lli_chans: The number of channels that needs to be configured
|
||||||
|
* to use SoftLLI.
|
||||||
|
* @use_esram_lcla: flag for mapping the lcla into esram region
|
||||||
|
* @num_of_phy_chans: The number of physical channels implemented in HW.
|
||||||
|
* 0 means reading the number of channels from DMA HW but this is only valid
|
||||||
|
* for 'multiple of 4' channels, like 8.
|
||||||
*/
|
*/
|
||||||
struct stedma40_platform_data {
|
struct stedma40_platform_data {
|
||||||
u32 dev_len;
|
u32 dev_len;
|
||||||
@ -157,7 +167,10 @@ struct stedma40_platform_data {
|
|||||||
struct stedma40_chan_cfg *memcpy_conf_phy;
|
struct stedma40_chan_cfg *memcpy_conf_phy;
|
||||||
struct stedma40_chan_cfg *memcpy_conf_log;
|
struct stedma40_chan_cfg *memcpy_conf_log;
|
||||||
int disabled_channels[STEDMA40_MAX_PHYS];
|
int disabled_channels[STEDMA40_MAX_PHYS];
|
||||||
|
int *soft_lli_chans;
|
||||||
|
int num_of_soft_lli_chans;
|
||||||
bool use_esram_lcla;
|
bool use_esram_lcla;
|
||||||
|
int num_of_phy_chans;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_STE_DMA40
|
#ifdef CONFIG_STE_DMA40
|
||||||
|
@ -1409,10 +1409,10 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
last_issued = tp->ucopy.dma_cookie;
|
last_issued = tp->ucopy.dma_cookie;
|
||||||
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
|
dma_async_issue_pending(tp->ucopy.dma_chan);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
|
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
|
||||||
last_issued, &done,
|
last_issued, &done,
|
||||||
&used) == DMA_SUCCESS) {
|
&used) == DMA_SUCCESS) {
|
||||||
/* Safe to free early-copied skbs now */
|
/* Safe to free early-copied skbs now */
|
||||||
@ -1754,7 +1754,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||||||
tcp_service_net_dma(sk, true);
|
tcp_service_net_dma(sk, true);
|
||||||
tcp_cleanup_rbuf(sk, copied);
|
tcp_cleanup_rbuf(sk, copied);
|
||||||
} else
|
} else
|
||||||
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
|
dma_async_issue_pending(tp->ucopy.dma_chan);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (copied >= target) {
|
if (copied >= target) {
|
||||||
@ -1847,7 +1847,7 @@ do_prequeue:
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
|
dma_async_issue_pending(tp->ucopy.dma_chan);
|
||||||
|
|
||||||
if ((offset + used) == skb->len)
|
if ((offset + used) == skb->len)
|
||||||
copied_early = true;
|
copied_early = true;
|
||||||
|
Loading…
Reference in New Issue
Block a user