2008-01-11 17:15:26 +00:00
|
|
|
/*
|
|
|
|
* Freescale DMA ALSA SoC PCM driver
|
|
|
|
*
|
|
|
|
* Author: Timur Tabi <timur@freescale.com>
|
|
|
|
*
|
2010-03-17 20:15:21 +00:00
|
|
|
* Copyright 2007-2010 Freescale Semiconductor, Inc.
|
|
|
|
*
|
|
|
|
* This file is licensed under the terms of the GNU General Public License
|
|
|
|
* version 2. This program is licensed "as is" without any warranty of any
|
|
|
|
* kind, whether express or implied.
|
2008-01-11 17:15:26 +00:00
|
|
|
*
|
|
|
|
* This driver implements ASoC support for the Elo DMA controller, which is
|
|
|
|
* the DMA controller on Freescale 83xx, 85xx, and 86xx SOCs. In ALSA terms,
|
|
|
|
* the PCM driver is what handles the DMA buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/delay.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/gfp.h>
|
2013-09-17 19:28:33 +00:00
|
|
|
#include <linux/of_address.h>
|
|
|
|
#include <linux/of_irq.h>
|
2010-03-17 20:15:21 +00:00
|
|
|
#include <linux/of_platform.h>
|
|
|
|
#include <linux/list.h>
|
2010-08-19 20:26:58 +00:00
|
|
|
#include <linux/slab.h>
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
#include <sound/core.h>
|
|
|
|
#include <sound/pcm.h>
|
|
|
|
#include <sound/pcm_params.h>
|
|
|
|
#include <sound/soc.h>
|
|
|
|
|
|
|
|
#include <asm/io.h>
|
|
|
|
|
|
|
|
#include "fsl_dma.h"
|
2010-03-17 20:15:21 +00:00
|
|
|
#include "fsl_ssi.h" /* For the offset of stx0 and srx0 */
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The formats that the DMA controller supports, which is anything
|
|
|
|
* that is 8, 16, or 32 bits.
|
|
|
|
*/
|
|
|
|
#define FSLDMA_PCM_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
|
|
|
|
SNDRV_PCM_FMTBIT_U8 | \
|
|
|
|
SNDRV_PCM_FMTBIT_S16_LE | \
|
|
|
|
SNDRV_PCM_FMTBIT_S16_BE | \
|
|
|
|
SNDRV_PCM_FMTBIT_U16_LE | \
|
|
|
|
SNDRV_PCM_FMTBIT_U16_BE | \
|
|
|
|
SNDRV_PCM_FMTBIT_S24_LE | \
|
|
|
|
SNDRV_PCM_FMTBIT_S24_BE | \
|
|
|
|
SNDRV_PCM_FMTBIT_U24_LE | \
|
|
|
|
SNDRV_PCM_FMTBIT_U24_BE | \
|
|
|
|
SNDRV_PCM_FMTBIT_S32_LE | \
|
|
|
|
SNDRV_PCM_FMTBIT_S32_BE | \
|
|
|
|
SNDRV_PCM_FMTBIT_U32_LE | \
|
|
|
|
SNDRV_PCM_FMTBIT_U32_BE)
|
2010-03-17 20:15:21 +00:00
|
|
|
struct dma_object {
|
|
|
|
struct snd_soc_platform_driver dai;
|
2008-01-11 17:15:26 +00:00
|
|
|
dma_addr_t ssi_stx_phys;
|
|
|
|
dma_addr_t ssi_srx_phys;
|
2010-08-06 17:16:12 +00:00
|
|
|
unsigned int ssi_fifo_depth;
|
2010-03-17 20:15:21 +00:00
|
|
|
struct ccsr_dma_channel __iomem *channel;
|
|
|
|
unsigned int irq;
|
|
|
|
bool assigned;
|
|
|
|
char path[1];
|
|
|
|
};
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The number of DMA links to use. Two is the bare minimum, but if you
|
|
|
|
* have really small links you might need more.
|
|
|
|
*/
|
|
|
|
#define NUM_DMA_LINKS 2
|
|
|
|
|
|
|
|
/** fsl_dma_private: p-substream DMA data
|
|
|
|
*
|
|
|
|
* Each substream has a 1-to-1 association with a DMA channel.
|
|
|
|
*
|
|
|
|
* The link[] array is first because it needs to be aligned on a 32-byte
|
|
|
|
* boundary, so putting it first will ensure alignment without padding the
|
|
|
|
* structure.
|
|
|
|
*
|
|
|
|
* @link[]: array of link descriptors
|
|
|
|
* @dma_channel: pointer to the DMA channel's registers
|
|
|
|
* @irq: IRQ for this DMA channel
|
|
|
|
* @substream: pointer to the substream object, needed by the ISR
|
|
|
|
* @ssi_sxx_phys: bus address of the STX or SRX register to use
|
|
|
|
* @ld_buf_phys: physical address of the LD buffer
|
|
|
|
* @current_link: index into link[] of the link currently being processed
|
|
|
|
* @dma_buf_phys: physical address of the DMA buffer
|
|
|
|
* @dma_buf_next: physical address of the next period to process
|
|
|
|
* @dma_buf_end: physical address of the byte after the end of the DMA
|
|
|
|
* @buffer period_size: the size of a single period
|
|
|
|
* @num_periods: the number of periods in the DMA buffer
|
|
|
|
*/
|
|
|
|
struct fsl_dma_private {
|
|
|
|
struct fsl_dma_link_descriptor link[NUM_DMA_LINKS];
|
|
|
|
struct ccsr_dma_channel __iomem *dma_channel;
|
|
|
|
unsigned int irq;
|
|
|
|
struct snd_pcm_substream *substream;
|
|
|
|
dma_addr_t ssi_sxx_phys;
|
2010-08-06 17:16:12 +00:00
|
|
|
unsigned int ssi_fifo_depth;
|
2008-01-11 17:15:26 +00:00
|
|
|
dma_addr_t ld_buf_phys;
|
|
|
|
unsigned int current_link;
|
|
|
|
dma_addr_t dma_buf_phys;
|
|
|
|
dma_addr_t dma_buf_next;
|
|
|
|
dma_addr_t dma_buf_end;
|
|
|
|
size_t period_size;
|
|
|
|
unsigned int num_periods;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_hardare: define characteristics of the PCM hardware.
|
|
|
|
*
|
|
|
|
* The PCM hardware is the Freescale DMA controller. This structure defines
|
|
|
|
* the capabilities of that hardware.
|
|
|
|
*
|
|
|
|
* Since the sampling rate and data format are not controlled by the DMA
|
|
|
|
* controller, we specify no limits for those values. The only exception is
|
|
|
|
* period_bytes_min, which is set to a reasonably low value to prevent the
|
|
|
|
* DMA controller from generating too many interrupts per second.
|
|
|
|
*
|
|
|
|
* Since each link descriptor has a 32-bit byte count field, we set
|
|
|
|
* period_bytes_max to the largest 32-bit number. We also have no maximum
|
|
|
|
* number of periods.
|
2008-07-28 22:04:39 +00:00
|
|
|
*
|
|
|
|
* Note that we specify SNDRV_PCM_INFO_JOINT_DUPLEX here, but only because a
|
|
|
|
* limitation in the SSI driver requires the sample rates for playback and
|
|
|
|
* capture to be the same.
|
2008-01-11 17:15:26 +00:00
|
|
|
*/
|
|
|
|
static const struct snd_pcm_hardware fsl_dma_hardware = {
|
|
|
|
|
2008-01-17 16:44:49 +00:00
|
|
|
.info = SNDRV_PCM_INFO_INTERLEAVED |
|
|
|
|
SNDRV_PCM_INFO_MMAP |
|
2008-07-28 22:04:39 +00:00
|
|
|
SNDRV_PCM_INFO_MMAP_VALID |
|
2009-03-07 00:39:34 +00:00
|
|
|
SNDRV_PCM_INFO_JOINT_DUPLEX |
|
|
|
|
SNDRV_PCM_INFO_PAUSE,
|
2008-01-11 17:15:26 +00:00
|
|
|
.formats = FSLDMA_PCM_FORMATS,
|
|
|
|
.period_bytes_min = 512, /* A reasonable limit */
|
|
|
|
.period_bytes_max = (u32) -1,
|
|
|
|
.periods_min = NUM_DMA_LINKS,
|
|
|
|
.periods_max = (unsigned int) -1,
|
|
|
|
.buffer_bytes_max = 128 * 1024, /* A reasonable limit */
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_abort_stream: tell ALSA that the DMA transfer has aborted
|
|
|
|
*
|
|
|
|
* This function should be called by the ISR whenever the DMA controller
|
|
|
|
* halts data transfer.
|
|
|
|
*/
|
|
|
|
static void fsl_dma_abort_stream(struct snd_pcm_substream *substream)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
snd_pcm_stream_lock_irqsave(substream, flags);
|
|
|
|
|
|
|
|
if (snd_pcm_running(substream))
|
|
|
|
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
|
|
|
|
|
|
|
|
snd_pcm_stream_unlock_irqrestore(substream, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_update_pointers - update LD pointers to point to the next period
|
|
|
|
*
|
|
|
|
* As each period is completed, this function changes the the link
|
|
|
|
* descriptor pointers for that period to point to the next period.
|
|
|
|
*/
|
|
|
|
static void fsl_dma_update_pointers(struct fsl_dma_private *dma_private)
|
|
|
|
{
|
|
|
|
struct fsl_dma_link_descriptor *link =
|
|
|
|
&dma_private->link[dma_private->current_link];
|
|
|
|
|
2010-08-02 17:44:36 +00:00
|
|
|
/* Update our link descriptors to point to the next period. On a 36-bit
|
|
|
|
* system, we also need to update the ESAD bits. We also set (keep) the
|
|
|
|
* snoop bits. See the comments in fsl_dma_hw_params() about snooping.
|
|
|
|
*/
|
|
|
|
if (dma_private->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
|
|
|
link->source_addr = cpu_to_be32(dma_private->dma_buf_next);
|
|
|
|
#ifdef CONFIG_PHYS_64BIT
|
|
|
|
link->source_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP |
|
|
|
|
upper_32_bits(dma_private->dma_buf_next));
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
link->dest_addr = cpu_to_be32(dma_private->dma_buf_next);
|
|
|
|
#ifdef CONFIG_PHYS_64BIT
|
|
|
|
link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP |
|
|
|
|
upper_32_bits(dma_private->dma_buf_next));
|
|
|
|
#endif
|
|
|
|
}
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
/* Update our variables for next time */
|
|
|
|
dma_private->dma_buf_next += dma_private->period_size;
|
|
|
|
|
|
|
|
if (dma_private->dma_buf_next >= dma_private->dma_buf_end)
|
|
|
|
dma_private->dma_buf_next = dma_private->dma_buf_phys;
|
|
|
|
|
|
|
|
if (++dma_private->current_link >= NUM_DMA_LINKS)
|
|
|
|
dma_private->current_link = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_isr: interrupt handler for the DMA controller
|
|
|
|
*
|
|
|
|
* @irq: IRQ of the DMA channel
|
|
|
|
* @dev_id: pointer to the dma_private structure for this DMA channel
|
|
|
|
*/
|
|
|
|
static irqreturn_t fsl_dma_isr(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct fsl_dma_private *dma_private = dev_id;
|
2010-03-17 20:15:21 +00:00
|
|
|
struct snd_pcm_substream *substream = dma_private->substream;
|
|
|
|
struct snd_soc_pcm_runtime *rtd = substream->private_data;
|
|
|
|
struct device *dev = rtd->platform->dev;
|
2008-01-11 17:15:26 +00:00
|
|
|
struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
|
|
|
|
irqreturn_t ret = IRQ_NONE;
|
|
|
|
u32 sr, sr2 = 0;
|
|
|
|
|
|
|
|
/* We got an interrupt, so read the status register to see what we
|
|
|
|
were interrupted for.
|
|
|
|
*/
|
|
|
|
sr = in_be32(&dma_channel->sr);
|
|
|
|
|
|
|
|
if (sr & CCSR_DMA_SR_TE) {
|
2010-03-17 20:15:21 +00:00
|
|
|
dev_err(dev, "dma transmit error\n");
|
|
|
|
fsl_dma_abort_stream(substream);
|
2008-01-11 17:15:26 +00:00
|
|
|
sr2 |= CCSR_DMA_SR_TE;
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sr & CCSR_DMA_SR_CH)
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
|
|
|
|
if (sr & CCSR_DMA_SR_PE) {
|
2010-03-17 20:15:21 +00:00
|
|
|
dev_err(dev, "dma programming error\n");
|
|
|
|
fsl_dma_abort_stream(substream);
|
2008-01-11 17:15:26 +00:00
|
|
|
sr2 |= CCSR_DMA_SR_PE;
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sr & CCSR_DMA_SR_EOLNI) {
|
|
|
|
sr2 |= CCSR_DMA_SR_EOLNI;
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sr & CCSR_DMA_SR_CB)
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
|
|
|
|
if (sr & CCSR_DMA_SR_EOSI) {
|
|
|
|
/* Tell ALSA we completed a period. */
|
|
|
|
snd_pcm_period_elapsed(substream);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update our link descriptors to point to the next period. We
|
|
|
|
* only need to do this if the number of periods is not equal to
|
|
|
|
* the number of links.
|
|
|
|
*/
|
|
|
|
if (dma_private->num_periods != NUM_DMA_LINKS)
|
|
|
|
fsl_dma_update_pointers(dma_private);
|
|
|
|
|
|
|
|
sr2 |= CCSR_DMA_SR_EOSI;
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sr & CCSR_DMA_SR_EOLSI) {
|
|
|
|
sr2 |= CCSR_DMA_SR_EOLSI;
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear the bits that we set */
|
|
|
|
if (sr2)
|
|
|
|
out_be32(&dma_channel->sr, sr2);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_new: initialize this PCM driver.
|
|
|
|
*
|
|
|
|
* This function is called when the codec driver calls snd_soc_new_pcms(),
|
2008-11-18 20:50:34 +00:00
|
|
|
* once for each .dai_link in the machine driver's snd_soc_card
|
2008-01-11 17:15:26 +00:00
|
|
|
* structure.
|
2010-08-02 17:44:36 +00:00
|
|
|
*
|
|
|
|
* snd_dma_alloc_pages() is just a front-end to dma_alloc_coherent(), which
|
|
|
|
* (currently) always allocates the DMA buffer in lowmem, even if GFP_HIGHMEM
|
|
|
|
* is specified. Therefore, any DMA buffers we allocate will always be in low
|
|
|
|
* memory, but we support for 36-bit physical addresses anyway.
|
|
|
|
*
|
|
|
|
* Regardless of where the memory is actually allocated, since the device can
|
|
|
|
* technically DMA to any 36-bit address, we do need to set the DMA mask to 36.
|
2008-01-11 17:15:26 +00:00
|
|
|
*/
|
2011-06-07 15:08:33 +00:00
|
|
|
static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
|
2008-01-11 17:15:26 +00:00
|
|
|
{
|
2011-06-07 15:08:33 +00:00
|
|
|
struct snd_card *card = rtd->card->snd_card;
|
|
|
|
struct snd_pcm *pcm = rtd->pcm;
|
2008-01-11 17:15:26 +00:00
|
|
|
int ret;
|
|
|
|
|
2013-06-27 11:53:37 +00:00
|
|
|
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-01-11 17:15:26 +00:00
|
|
|
|
2010-08-19 21:43:42 +00:00
|
|
|
/* Some codecs have separate DAIs for playback and capture, so we
|
|
|
|
* should allocate a DMA buffer only for the streams that are valid.
|
|
|
|
*/
|
|
|
|
|
2012-01-01 01:14:24 +00:00
|
|
|
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
2010-08-19 21:43:42 +00:00
|
|
|
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
|
|
|
|
fsl_dma_hardware.buffer_bytes_max,
|
2012-01-01 01:14:24 +00:00
|
|
|
&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer);
|
2010-08-19 21:43:42 +00:00
|
|
|
if (ret) {
|
|
|
|
dev_err(card->dev, "can't alloc playback dma buffer\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2008-01-11 17:15:26 +00:00
|
|
|
}
|
|
|
|
|
2012-01-01 01:14:24 +00:00
|
|
|
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
|
2010-08-19 21:43:42 +00:00
|
|
|
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
|
|
|
|
fsl_dma_hardware.buffer_bytes_max,
|
2012-01-01 01:14:24 +00:00
|
|
|
&pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->dma_buffer);
|
2010-08-19 21:43:42 +00:00
|
|
|
if (ret) {
|
|
|
|
dev_err(card->dev, "can't alloc capture dma buffer\n");
|
2012-01-01 01:14:24 +00:00
|
|
|
snd_dma_free_pages(&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer);
|
2010-08-19 21:43:42 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2008-01-11 17:15:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_open: open a new substream.
|
|
|
|
*
|
|
|
|
* Each substream has its own DMA buffer.
|
2008-08-01 19:58:44 +00:00
|
|
|
*
|
|
|
|
* ALSA divides the DMA buffer into N periods. We create NUM_DMA_LINKS link
|
|
|
|
* descriptors that ping-pong from one period to the next. For example, if
|
|
|
|
* there are six periods and two link descriptors, this is how they look
|
|
|
|
* before playback starts:
|
|
|
|
*
|
|
|
|
* The last link descriptor
|
|
|
|
* ____________ points back to the first
|
|
|
|
* | |
|
|
|
|
* V |
|
|
|
|
* ___ ___ |
|
|
|
|
* | |->| |->|
|
|
|
|
* |___| |___|
|
|
|
|
* | |
|
|
|
|
* | |
|
|
|
|
* V V
|
|
|
|
* _________________________________________
|
|
|
|
* | | | | | | | The DMA buffer is
|
|
|
|
* | | | | | | | divided into 6 parts
|
|
|
|
* |______|______|______|______|______|______|
|
|
|
|
*
|
|
|
|
* and here's how they look after the first period is finished playing:
|
|
|
|
*
|
|
|
|
* ____________
|
|
|
|
* | |
|
|
|
|
* V |
|
|
|
|
* ___ ___ |
|
|
|
|
* | |->| |->|
|
|
|
|
* |___| |___|
|
|
|
|
* | |
|
|
|
|
* |______________
|
|
|
|
* | |
|
|
|
|
* V V
|
|
|
|
* _________________________________________
|
|
|
|
* | | | | | | |
|
|
|
|
* | | | | | | |
|
|
|
|
* |______|______|______|______|______|______|
|
|
|
|
*
|
|
|
|
* The first link descriptor now points to the third period. The DMA
|
|
|
|
* controller is currently playing the second period. When it finishes, it
|
|
|
|
* will jump back to the first descriptor and play the third period.
|
|
|
|
*
|
|
|
|
* There are four reasons we do this:
|
|
|
|
*
|
|
|
|
* 1. The only way to get the DMA controller to automatically restart the
|
|
|
|
* transfer when it gets to the end of the buffer is to use chaining
|
|
|
|
* mode. Basic direct mode doesn't offer that feature.
|
|
|
|
* 2. We need to receive an interrupt at the end of every period. The DMA
|
|
|
|
* controller can generate an interrupt at the end of every link transfer
|
|
|
|
* (aka segment). Making each period into a DMA segment will give us the
|
|
|
|
* interrupts we need.
|
|
|
|
* 3. By creating only two link descriptors, regardless of the number of
|
|
|
|
* periods, we do not need to reallocate the link descriptors if the
|
|
|
|
* number of periods changes.
|
|
|
|
* 4. All of the audio data is still stored in a single, contiguous DMA
|
|
|
|
* buffer, which is what ALSA expects. We're just dividing it into
|
|
|
|
* contiguous parts, and creating a link descriptor for each one.
|
2008-01-11 17:15:26 +00:00
|
|
|
*/
|
|
|
|
static int fsl_dma_open(struct snd_pcm_substream *substream)
|
|
|
|
{
|
|
|
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
2010-03-17 20:15:21 +00:00
|
|
|
struct snd_soc_pcm_runtime *rtd = substream->private_data;
|
|
|
|
struct device *dev = rtd->platform->dev;
|
|
|
|
struct dma_object *dma =
|
|
|
|
container_of(rtd->platform->driver, struct dma_object, dai);
|
2008-01-11 17:15:26 +00:00
|
|
|
struct fsl_dma_private *dma_private;
|
2008-08-01 19:58:44 +00:00
|
|
|
struct ccsr_dma_channel __iomem *dma_channel;
|
2008-01-11 17:15:26 +00:00
|
|
|
dma_addr_t ld_buf_phys;
|
2008-08-01 19:58:44 +00:00
|
|
|
u64 temp_link; /* Pointer to next link descriptor */
|
|
|
|
u32 mr;
|
2008-01-11 17:15:26 +00:00
|
|
|
unsigned int channel;
|
|
|
|
int ret = 0;
|
2008-08-01 19:58:44 +00:00
|
|
|
unsigned int i;
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reject any DMA buffer whose size is not a multiple of the period
|
|
|
|
* size. We need to make sure that the DMA buffer can be evenly divided
|
|
|
|
* into periods.
|
|
|
|
*/
|
|
|
|
ret = snd_pcm_hw_constraint_integer(runtime,
|
|
|
|
SNDRV_PCM_HW_PARAM_PERIODS);
|
|
|
|
if (ret < 0) {
|
2010-03-17 20:15:21 +00:00
|
|
|
dev_err(dev, "invalid buffer size\n");
|
2008-01-11 17:15:26 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
channel = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 0 : 1;
|
|
|
|
|
2010-03-17 20:15:21 +00:00
|
|
|
if (dma->assigned) {
|
|
|
|
dev_err(dev, "dma channel already assigned\n");
|
2008-01-11 17:15:26 +00:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2010-03-17 20:15:21 +00:00
|
|
|
dma_private = dma_alloc_coherent(dev, sizeof(struct fsl_dma_private),
|
|
|
|
&ld_buf_phys, GFP_KERNEL);
|
2008-01-11 17:15:26 +00:00
|
|
|
if (!dma_private) {
|
2010-03-17 20:15:21 +00:00
|
|
|
dev_err(dev, "can't allocate dma private data\n");
|
2008-01-11 17:15:26 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
2010-03-17 20:15:21 +00:00
|
|
|
dma_private->ssi_sxx_phys = dma->ssi_stx_phys;
|
2008-01-11 17:15:26 +00:00
|
|
|
else
|
2010-03-17 20:15:21 +00:00
|
|
|
dma_private->ssi_sxx_phys = dma->ssi_srx_phys;
|
2008-01-11 17:15:26 +00:00
|
|
|
|
2010-08-06 17:16:12 +00:00
|
|
|
dma_private->ssi_fifo_depth = dma->ssi_fifo_depth;
|
2010-03-17 20:15:21 +00:00
|
|
|
dma_private->dma_channel = dma->channel;
|
|
|
|
dma_private->irq = dma->irq;
|
2008-01-11 17:15:26 +00:00
|
|
|
dma_private->substream = substream;
|
|
|
|
dma_private->ld_buf_phys = ld_buf_phys;
|
|
|
|
dma_private->dma_buf_phys = substream->dma_buffer.addr;
|
|
|
|
|
2011-06-08 20:02:56 +00:00
|
|
|
ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "fsldma-audio",
|
|
|
|
dma_private);
|
2008-01-11 17:15:26 +00:00
|
|
|
if (ret) {
|
2010-03-17 20:15:21 +00:00
|
|
|
dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
|
2008-01-11 17:15:26 +00:00
|
|
|
dma_private->irq, ret);
|
2010-03-17 20:15:21 +00:00
|
|
|
dma_free_coherent(dev, sizeof(struct fsl_dma_private),
|
2008-01-11 17:15:26 +00:00
|
|
|
dma_private, dma_private->ld_buf_phys);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-03-17 20:15:21 +00:00
|
|
|
dma->assigned = 1;
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
|
|
|
|
snd_soc_set_runtime_hwparams(substream, &fsl_dma_hardware);
|
|
|
|
runtime->private_data = dma_private;
|
|
|
|
|
2008-08-01 19:58:44 +00:00
|
|
|
/* Program the fixed DMA controller parameters */
|
|
|
|
|
|
|
|
dma_channel = dma_private->dma_channel;
|
|
|
|
|
|
|
|
temp_link = dma_private->ld_buf_phys +
|
|
|
|
sizeof(struct fsl_dma_link_descriptor);
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_DMA_LINKS; i++) {
|
2009-02-05 23:56:02 +00:00
|
|
|
dma_private->link[i].next = cpu_to_be64(temp_link);
|
2008-08-01 19:58:44 +00:00
|
|
|
|
|
|
|
temp_link += sizeof(struct fsl_dma_link_descriptor);
|
|
|
|
}
|
|
|
|
/* The last link descriptor points to the first */
|
|
|
|
dma_private->link[i - 1].next = cpu_to_be64(dma_private->ld_buf_phys);
|
|
|
|
|
|
|
|
/* Tell the DMA controller where the first link descriptor is */
|
|
|
|
out_be32(&dma_channel->clndar,
|
|
|
|
CCSR_DMA_CLNDAR_ADDR(dma_private->ld_buf_phys));
|
|
|
|
out_be32(&dma_channel->eclndar,
|
|
|
|
CCSR_DMA_ECLNDAR_ADDR(dma_private->ld_buf_phys));
|
|
|
|
|
|
|
|
/* The manual says the BCR must be clear before enabling EMP */
|
|
|
|
out_be32(&dma_channel->bcr, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Program the mode register for interrupts, external master control,
|
|
|
|
* and source/destination hold. Also clear the Channel Abort bit.
|
|
|
|
*/
|
|
|
|
mr = in_be32(&dma_channel->mr) &
|
|
|
|
~(CCSR_DMA_MR_CA | CCSR_DMA_MR_DAHE | CCSR_DMA_MR_SAHE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want External Master Start and External Master Pause enabled,
|
|
|
|
* because the SSI is controlling the DMA controller. We want the DMA
|
|
|
|
* controller to be set up in advance, and then we signal only the SSI
|
|
|
|
* to start transferring.
|
|
|
|
*
|
|
|
|
* We want End-Of-Segment Interrupts enabled, because this will generate
|
|
|
|
* an interrupt at the end of each segment (each link descriptor
|
|
|
|
* represents one segment). Each DMA segment is the same thing as an
|
|
|
|
* ALSA period, so this is how we get an interrupt at the end of every
|
|
|
|
* period.
|
|
|
|
*
|
|
|
|
* We want Error Interrupt enabled, so that we can get an error if
|
|
|
|
* the DMA controller is mis-programmed somehow.
|
|
|
|
*/
|
|
|
|
mr |= CCSR_DMA_MR_EOSIE | CCSR_DMA_MR_EIE | CCSR_DMA_MR_EMP_EN |
|
|
|
|
CCSR_DMA_MR_EMS_EN;
|
|
|
|
|
|
|
|
/* For playback, we want the destination address to be held. For
|
|
|
|
capture, set the source address to be held. */
|
|
|
|
mr |= (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
|
|
|
|
CCSR_DMA_MR_DAHE : CCSR_DMA_MR_SAHE;
|
|
|
|
|
|
|
|
out_be32(&dma_channel->mr, mr);
|
|
|
|
|
2008-01-11 17:15:26 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2008-08-01 19:58:44 +00:00
|
|
|
* fsl_dma_hw_params: continue initializing the DMA links
|
2008-01-11 17:15:26 +00:00
|
|
|
*
|
2008-08-01 19:58:44 +00:00
|
|
|
* This function obtains hardware parameters about the opened stream and
|
|
|
|
* programs the DMA controller accordingly.
|
2008-01-11 17:15:26 +00:00
|
|
|
*
|
2009-02-05 23:56:02 +00:00
|
|
|
* One drawback of big-endian is that when copying integers of different
|
|
|
|
* sizes to a fixed-sized register, the address to which the integer must be
|
|
|
|
* copied is dependent on the size of the integer.
|
2008-01-11 17:15:26 +00:00
|
|
|
*
|
|
|
|
* For example, if P is the address of a 32-bit register, and X is a 32-bit
|
|
|
|
* integer, then X should be copied to address P. However, if X is a 16-bit
|
|
|
|
* integer, then it should be copied to P+2. If X is an 8-bit register,
|
|
|
|
* then it should be copied to P+3.
|
|
|
|
*
|
|
|
|
* So for playback of 8-bit samples, the DMA controller must transfer single
|
|
|
|
* bytes from the DMA buffer to the last byte of the STX0 register, i.e.
|
|
|
|
* offset by 3 bytes. For 16-bit samples, the offset is two bytes.
|
|
|
|
*
|
|
|
|
* For 24-bit samples, the offset is 1 byte. However, the DMA controller
|
|
|
|
* does not support 3-byte copies (the DAHTS register supports only 1, 2, 4,
|
|
|
|
* and 8 bytes at a time). So we do not support packed 24-bit samples.
|
|
|
|
* 24-bit data must be padded to 32 bits.
|
|
|
|
*/
|
2009-02-05 23:56:02 +00:00
|
|
|
static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
|
|
|
|
struct snd_pcm_hw_params *hw_params)
|
2008-01-11 17:15:26 +00:00
|
|
|
{
|
|
|
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
|
|
|
struct fsl_dma_private *dma_private = runtime->private_data;
|
2010-03-17 20:15:21 +00:00
|
|
|
struct snd_soc_pcm_runtime *rtd = substream->private_data;
|
|
|
|
struct device *dev = rtd->platform->dev;
|
2009-02-05 23:56:02 +00:00
|
|
|
|
|
|
|
/* Number of bits per sample */
|
2010-08-06 17:16:12 +00:00
|
|
|
unsigned int sample_bits =
|
2009-02-05 23:56:02 +00:00
|
|
|
snd_pcm_format_physical_width(params_format(hw_params));
|
|
|
|
|
|
|
|
/* Number of bytes per frame */
|
2010-08-06 17:16:12 +00:00
|
|
|
unsigned int sample_bytes = sample_bits / 8;
|
2009-02-05 23:56:02 +00:00
|
|
|
|
|
|
|
/* Bus address of SSI STX register */
|
|
|
|
dma_addr_t ssi_sxx_phys = dma_private->ssi_sxx_phys;
|
|
|
|
|
|
|
|
/* Size of the DMA buffer, in bytes */
|
|
|
|
size_t buffer_size = params_buffer_bytes(hw_params);
|
|
|
|
|
|
|
|
/* Number of bytes per period */
|
|
|
|
size_t period_size = params_period_bytes(hw_params);
|
|
|
|
|
|
|
|
/* Pointer to next period */
|
|
|
|
dma_addr_t temp_addr = substream->dma_buffer.addr;
|
|
|
|
|
|
|
|
/* Pointer to DMA controller */
|
2008-01-11 17:15:26 +00:00
|
|
|
struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
|
2009-02-05 23:56:02 +00:00
|
|
|
|
|
|
|
u32 mr; /* DMA Mode Register */
|
|
|
|
|
2008-01-11 17:15:26 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2009-02-05 23:56:02 +00:00
|
|
|
/* Initialize our DMA tracking variables */
|
|
|
|
dma_private->period_size = period_size;
|
|
|
|
dma_private->num_periods = params_periods(hw_params);
|
|
|
|
dma_private->dma_buf_end = dma_private->dma_buf_phys + buffer_size;
|
|
|
|
dma_private->dma_buf_next = dma_private->dma_buf_phys +
|
|
|
|
(NUM_DMA_LINKS * period_size);
|
|
|
|
|
|
|
|
if (dma_private->dma_buf_next >= dma_private->dma_buf_end)
|
|
|
|
/* This happens if the number of periods == NUM_DMA_LINKS */
|
|
|
|
dma_private->dma_buf_next = dma_private->dma_buf_phys;
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
mr = in_be32(&dma_channel->mr) & ~(CCSR_DMA_MR_BWC_MASK |
|
|
|
|
CCSR_DMA_MR_SAHTS_MASK | CCSR_DMA_MR_DAHTS_MASK);
|
|
|
|
|
2009-02-05 23:56:02 +00:00
|
|
|
/* Due to a quirk of the SSI's STX register, the target address
|
|
|
|
* for the DMA operations depends on the sample size. So we calculate
|
|
|
|
* that offset here. While we're at it, also tell the DMA controller
|
|
|
|
* how much data to transfer per sample.
|
|
|
|
*/
|
2010-08-06 17:16:12 +00:00
|
|
|
switch (sample_bits) {
|
2008-01-11 17:15:26 +00:00
|
|
|
case 8:
|
|
|
|
mr |= CCSR_DMA_MR_DAHTS_1 | CCSR_DMA_MR_SAHTS_1;
|
|
|
|
ssi_sxx_phys += 3;
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
mr |= CCSR_DMA_MR_DAHTS_2 | CCSR_DMA_MR_SAHTS_2;
|
|
|
|
ssi_sxx_phys += 2;
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
mr |= CCSR_DMA_MR_DAHTS_4 | CCSR_DMA_MR_SAHTS_4;
|
|
|
|
break;
|
|
|
|
default:
|
2009-02-05 23:56:02 +00:00
|
|
|
/* We should never get here */
|
2010-08-06 17:16:12 +00:00
|
|
|
dev_err(dev, "unsupported sample size %u\n", sample_bits);
|
2008-01-11 17:15:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-08-06 17:16:12 +00:00
|
|
|
* BWC determines how many bytes are sent/received before the DMA
|
|
|
|
* controller checks the SSI to see if it needs to stop. BWC should
|
|
|
|
* always be a multiple of the frame size, so that we always transmit
|
|
|
|
* whole frames. Each frame occupies two slots in the FIFO. The
|
|
|
|
* parameter for CCSR_DMA_MR_BWC() is rounded down the next power of two
|
|
|
|
* (MR[BWC] can only represent even powers of two).
|
|
|
|
*
|
|
|
|
* To simplify the process, we set BWC to the largest value that is
|
|
|
|
* less than or equal to the FIFO watermark. For playback, this ensures
|
|
|
|
* that we transfer the maximum amount without overrunning the FIFO.
|
|
|
|
* For capture, this ensures that we transfer the maximum amount without
|
|
|
|
* underrunning the FIFO.
|
|
|
|
*
|
|
|
|
* f = SSI FIFO depth
|
|
|
|
* w = SSI watermark value (which equals f - 2)
|
|
|
|
* b = DMA bandwidth count (in bytes)
|
|
|
|
* s = sample size (in bytes, which equals frame_size * 2)
|
|
|
|
*
|
|
|
|
* For playback, we never transmit more than the transmit FIFO
|
|
|
|
* watermark, otherwise we might write more data than the FIFO can hold.
|
|
|
|
* The watermark is equal to the FIFO depth minus two.
|
|
|
|
*
|
|
|
|
* For capture, two equations must hold:
|
|
|
|
* w > f - (b / s)
|
|
|
|
* w >= b / s
|
|
|
|
*
|
|
|
|
* So, b > 2 * s, but b must also be <= s * w. To simplify, we set
|
|
|
|
* b = s * w, which is equal to
|
|
|
|
* (dma_private->ssi_fifo_depth - 2) * sample_bytes.
|
2008-01-11 17:15:26 +00:00
|
|
|
*/
|
2010-08-06 17:16:12 +00:00
|
|
|
mr |= CCSR_DMA_MR_BWC((dma_private->ssi_fifo_depth - 2) * sample_bytes);
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
out_be32(&dma_channel->mr, mr);
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_DMA_LINKS; i++) {
|
|
|
|
struct fsl_dma_link_descriptor *link = &dma_private->link[i];
|
|
|
|
|
2009-02-05 23:56:02 +00:00
|
|
|
link->count = cpu_to_be32(period_size);
|
|
|
|
|
2010-08-02 17:44:36 +00:00
|
|
|
/* The snoop bit tells the DMA controller whether it should tell
|
2009-02-05 23:56:02 +00:00
|
|
|
* the ECM to snoop during a read or write to an address. For
|
|
|
|
* audio, we use DMA to transfer data between memory and an I/O
|
|
|
|
* device (the SSI's STX0 or SRX0 register). Snooping is only
|
|
|
|
* needed if there is a cache, so we need to snoop memory
|
|
|
|
* addresses only. For playback, that means we snoop the source
|
|
|
|
* but not the destination. For capture, we snoop the
|
|
|
|
* destination but not the source.
|
|
|
|
*
|
|
|
|
* Note that failing to snoop properly is unlikely to cause
|
|
|
|
* cache incoherency if the period size is larger than the
|
|
|
|
* size of L1 cache. This is because filling in one period will
|
|
|
|
* flush out the data for the previous period. So if you
|
|
|
|
* increased period_bytes_min to a large enough size, you might
|
|
|
|
* get more performance by not snooping, and you'll still be
|
2010-08-02 17:44:36 +00:00
|
|
|
* okay. You'll need to update fsl_dma_update_pointers() also.
|
2009-02-05 23:56:02 +00:00
|
|
|
*/
|
|
|
|
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
|
|
|
link->source_addr = cpu_to_be32(temp_addr);
|
2010-08-02 17:44:36 +00:00
|
|
|
link->source_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP |
|
|
|
|
upper_32_bits(temp_addr));
|
2009-02-05 23:56:02 +00:00
|
|
|
|
2008-01-11 17:15:26 +00:00
|
|
|
link->dest_addr = cpu_to_be32(ssi_sxx_phys);
|
2010-08-02 17:44:36 +00:00
|
|
|
link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_NOSNOOP |
|
|
|
|
upper_32_bits(ssi_sxx_phys));
|
2009-02-05 23:56:02 +00:00
|
|
|
} else {
|
2008-01-11 17:15:26 +00:00
|
|
|
link->source_addr = cpu_to_be32(ssi_sxx_phys);
|
2010-08-02 17:44:36 +00:00
|
|
|
link->source_attr = cpu_to_be32(CCSR_DMA_ATR_NOSNOOP |
|
|
|
|
upper_32_bits(ssi_sxx_phys));
|
2009-02-05 23:56:02 +00:00
|
|
|
|
|
|
|
link->dest_addr = cpu_to_be32(temp_addr);
|
2010-08-02 17:44:36 +00:00
|
|
|
link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP |
|
|
|
|
upper_32_bits(temp_addr));
|
2009-02-05 23:56:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
temp_addr += period_size;
|
2008-01-11 17:15:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_pointer: determine the current position of the DMA transfer
|
|
|
|
*
|
|
|
|
* This function is called by ALSA when ALSA wants to know where in the
|
|
|
|
* stream buffer the hardware currently is.
|
|
|
|
*
|
|
|
|
* For playback, the SAR register contains the physical address of the most
|
|
|
|
* recent DMA transfer. For capture, the value is in the DAR register.
|
|
|
|
*
|
|
|
|
* The base address of the buffer is stored in the source_addr field of the
|
|
|
|
* first link descriptor.
|
|
|
|
*/
|
|
|
|
static snd_pcm_uframes_t fsl_dma_pointer(struct snd_pcm_substream *substream)
|
|
|
|
{
|
|
|
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
|
|
|
struct fsl_dma_private *dma_private = runtime->private_data;
|
2010-03-17 20:15:21 +00:00
|
|
|
struct snd_soc_pcm_runtime *rtd = substream->private_data;
|
|
|
|
struct device *dev = rtd->platform->dev;
|
2008-01-11 17:15:26 +00:00
|
|
|
struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
|
|
|
|
dma_addr_t position;
|
|
|
|
snd_pcm_uframes_t frames;
|
|
|
|
|
2010-08-02 17:44:36 +00:00
|
|
|
/* Obtain the current DMA pointer, but don't read the ESAD bits if we
|
|
|
|
* only have 32-bit DMA addresses. This function is typically called
|
|
|
|
* in interrupt context, so we need to optimize it.
|
|
|
|
*/
|
|
|
|
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
2008-01-11 17:15:26 +00:00
|
|
|
position = in_be32(&dma_channel->sar);
|
2010-08-02 17:44:36 +00:00
|
|
|
#ifdef CONFIG_PHYS_64BIT
|
|
|
|
position |= (u64)(in_be32(&dma_channel->satr) &
|
|
|
|
CCSR_DMA_ATR_ESAD_MASK) << 32;
|
|
|
|
#endif
|
|
|
|
} else {
|
2008-01-11 17:15:26 +00:00
|
|
|
position = in_be32(&dma_channel->dar);
|
2010-08-02 17:44:36 +00:00
|
|
|
#ifdef CONFIG_PHYS_64BIT
|
|
|
|
position |= (u64)(in_be32(&dma_channel->datr) &
|
|
|
|
CCSR_DMA_ATR_ESAD_MASK) << 32;
|
|
|
|
#endif
|
|
|
|
}
|
2008-01-11 17:15:26 +00:00
|
|
|
|
2009-03-25 23:20:37 +00:00
|
|
|
/*
|
|
|
|
* When capture is started, the SSI immediately starts to fill its FIFO.
|
|
|
|
* This means that the DMA controller is not started until the FIFO is
|
|
|
|
* full. However, ALSA calls this function before that happens, when
|
|
|
|
* MR.DAR is still zero. In this case, just return zero to indicate
|
|
|
|
* that nothing has been received yet.
|
|
|
|
*/
|
|
|
|
if (!position)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((position < dma_private->dma_buf_phys) ||
|
|
|
|
(position > dma_private->dma_buf_end)) {
|
2010-03-17 20:15:21 +00:00
|
|
|
dev_err(dev, "dma pointer is out of range, halting stream\n");
|
2009-03-25 23:20:37 +00:00
|
|
|
return SNDRV_PCM_POS_XRUN;
|
|
|
|
}
|
|
|
|
|
2008-01-11 17:15:26 +00:00
|
|
|
frames = bytes_to_frames(runtime, position - dma_private->dma_buf_phys);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the current address is just past the end of the buffer, wrap it
|
|
|
|
* around.
|
|
|
|
*/
|
|
|
|
if (frames == runtime->buffer_size)
|
|
|
|
frames = 0;
|
|
|
|
|
|
|
|
return frames;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_hw_free: release resources allocated in fsl_dma_hw_params()
|
|
|
|
*
|
|
|
|
* Release the resources allocated in fsl_dma_hw_params() and de-program the
|
|
|
|
* registers.
|
|
|
|
*
|
|
|
|
* This function can be called multiple times.
|
|
|
|
*/
|
|
|
|
static int fsl_dma_hw_free(struct snd_pcm_substream *substream)
|
|
|
|
{
|
|
|
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
|
|
|
struct fsl_dma_private *dma_private = runtime->private_data;
|
|
|
|
|
|
|
|
if (dma_private) {
|
|
|
|
struct ccsr_dma_channel __iomem *dma_channel;
|
|
|
|
|
|
|
|
dma_channel = dma_private->dma_channel;
|
|
|
|
|
|
|
|
/* Stop the DMA */
|
|
|
|
out_be32(&dma_channel->mr, CCSR_DMA_MR_CA);
|
|
|
|
out_be32(&dma_channel->mr, 0);
|
|
|
|
|
|
|
|
/* Reset all the other registers */
|
|
|
|
out_be32(&dma_channel->sr, -1);
|
|
|
|
out_be32(&dma_channel->clndar, 0);
|
|
|
|
out_be32(&dma_channel->eclndar, 0);
|
|
|
|
out_be32(&dma_channel->satr, 0);
|
|
|
|
out_be32(&dma_channel->sar, 0);
|
|
|
|
out_be32(&dma_channel->datr, 0);
|
|
|
|
out_be32(&dma_channel->dar, 0);
|
|
|
|
out_be32(&dma_channel->bcr, 0);
|
|
|
|
out_be32(&dma_channel->nlndar, 0);
|
|
|
|
out_be32(&dma_channel->enlndar, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsl_dma_close: close the stream.
|
|
|
|
*/
|
|
|
|
static int fsl_dma_close(struct snd_pcm_substream *substream)
|
|
|
|
{
|
|
|
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
|
|
|
struct fsl_dma_private *dma_private = runtime->private_data;
|
2010-03-17 20:15:21 +00:00
|
|
|
struct snd_soc_pcm_runtime *rtd = substream->private_data;
|
|
|
|
struct device *dev = rtd->platform->dev;
|
|
|
|
struct dma_object *dma =
|
|
|
|
container_of(rtd->platform->driver, struct dma_object, dai);
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
if (dma_private) {
|
|
|
|
if (dma_private->irq)
|
|
|
|
free_irq(dma_private->irq, dma_private);
|
|
|
|
|
|
|
|
/* Deallocate the fsl_dma_private structure */
|
2010-03-17 20:15:21 +00:00
|
|
|
dma_free_coherent(dev, sizeof(struct fsl_dma_private),
|
|
|
|
dma_private, dma_private->ld_buf_phys);
|
2008-01-11 17:15:26 +00:00
|
|
|
substream->runtime->private_data = NULL;
|
|
|
|
}
|
|
|
|
|
2010-03-17 20:15:21 +00:00
|
|
|
dma->assigned = 0;
|
2008-01-11 17:15:26 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove this PCM driver.
|
|
|
|
*/
|
|
|
|
static void fsl_dma_free_dma_buffers(struct snd_pcm *pcm)
|
|
|
|
{
|
|
|
|
struct snd_pcm_substream *substream;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
|
|
|
|
substream = pcm->streams[i].substream;
|
|
|
|
if (substream) {
|
|
|
|
snd_dma_free_pages(&substream->dma_buffer);
|
|
|
|
substream->dma_buffer.area = NULL;
|
|
|
|
substream->dma_buffer.addr = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-17 20:15:21 +00:00
|
|
|
/**
|
2013-12-18 13:50:10 +00:00
|
|
|
* find_ssi_node -- returns the SSI node that points to its DMA channel node
|
2010-03-17 20:15:21 +00:00
|
|
|
*
|
|
|
|
* Although this DMA driver attempts to operate independently of the other
|
|
|
|
* devices, it still needs to determine some information about the SSI device
|
|
|
|
* that it's working with. Unfortunately, the device tree does not contain
|
|
|
|
* a pointer from the DMA channel node to the SSI node -- the pointer goes the
|
|
|
|
* other way. So we need to scan the device tree for SSI nodes until we find
|
|
|
|
* the one that points to the given DMA channel node. It's ugly, but at least
|
|
|
|
* it's contained in this one function.
|
|
|
|
*/
|
|
|
|
static struct device_node *find_ssi_node(struct device_node *dma_channel_np)
|
|
|
|
{
|
|
|
|
struct device_node *ssi_np, *np;
|
|
|
|
|
|
|
|
for_each_compatible_node(ssi_np, NULL, "fsl,mpc8610-ssi") {
|
|
|
|
/* Check each DMA phandle to see if it points to us. We
|
|
|
|
* assume that device_node pointers are a valid comparison.
|
|
|
|
*/
|
|
|
|
np = of_parse_phandle(ssi_np, "fsl,playback-dma", 0);
|
2011-08-22 14:22:41 +00:00
|
|
|
of_node_put(np);
|
2010-03-17 20:15:21 +00:00
|
|
|
if (np == dma_channel_np)
|
|
|
|
return ssi_np;
|
|
|
|
|
|
|
|
np = of_parse_phandle(ssi_np, "fsl,capture-dma", 0);
|
2011-08-22 14:22:41 +00:00
|
|
|
of_node_put(np);
|
2010-03-17 20:15:21 +00:00
|
|
|
if (np == dma_channel_np)
|
|
|
|
return ssi_np;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-01-11 17:15:26 +00:00
|
|
|
static struct snd_pcm_ops fsl_dma_ops = {
|
|
|
|
.open = fsl_dma_open,
|
|
|
|
.close = fsl_dma_close,
|
|
|
|
.ioctl = snd_pcm_lib_ioctl,
|
|
|
|
.hw_params = fsl_dma_hw_params,
|
|
|
|
.hw_free = fsl_dma_hw_free,
|
|
|
|
.pointer = fsl_dma_pointer,
|
|
|
|
};
|
|
|
|
|
2012-12-07 14:26:16 +00:00
|
|
|
static int fsl_soc_dma_probe(struct platform_device *pdev)
|
2010-03-17 20:15:21 +00:00
|
|
|
{
|
|
|
|
struct dma_object *dma;
|
2010-08-19 20:26:58 +00:00
|
|
|
struct device_node *np = pdev->dev.of_node;
|
2010-03-17 20:15:21 +00:00
|
|
|
struct device_node *ssi_np;
|
|
|
|
struct resource res;
|
2010-08-06 17:16:12 +00:00
|
|
|
const uint32_t *iprop;
|
2010-03-17 20:15:21 +00:00
|
|
|
int ret;
|
2008-01-11 17:15:26 +00:00
|
|
|
|
2010-03-17 20:15:21 +00:00
|
|
|
/* Find the SSI node that points to us. */
|
|
|
|
ssi_np = find_ssi_node(np);
|
|
|
|
if (!ssi_np) {
|
2010-08-19 20:26:58 +00:00
|
|
|
dev_err(&pdev->dev, "cannot find parent SSI node\n");
|
2010-03-17 20:15:21 +00:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = of_address_to_resource(ssi_np, 0, &res);
|
|
|
|
if (ret) {
|
2010-08-19 20:26:58 +00:00
|
|
|
dev_err(&pdev->dev, "could not determine resources for %s\n",
|
2010-08-06 17:16:12 +00:00
|
|
|
ssi_np->full_name);
|
|
|
|
of_node_put(ssi_np);
|
2010-03-17 20:15:21 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma = kzalloc(sizeof(*dma) + strlen(np->full_name), GFP_KERNEL);
|
|
|
|
if (!dma) {
|
2010-08-19 20:26:58 +00:00
|
|
|
dev_err(&pdev->dev, "could not allocate dma object\n");
|
2010-08-06 17:16:12 +00:00
|
|
|
of_node_put(ssi_np);
|
2010-03-17 20:15:21 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
strcpy(dma->path, np->full_name);
|
|
|
|
dma->dai.ops = &fsl_dma_ops;
|
|
|
|
dma->dai.pcm_new = fsl_dma_new;
|
|
|
|
dma->dai.pcm_free = fsl_dma_free_dma_buffers;
|
|
|
|
|
|
|
|
/* Store the SSI-specific information that we need */
|
|
|
|
dma->ssi_stx_phys = res.start + offsetof(struct ccsr_ssi, stx0);
|
|
|
|
dma->ssi_srx_phys = res.start + offsetof(struct ccsr_ssi, srx0);
|
|
|
|
|
2010-08-06 17:16:12 +00:00
|
|
|
iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL);
|
|
|
|
if (iprop)
|
2011-06-08 20:02:55 +00:00
|
|
|
dma->ssi_fifo_depth = be32_to_cpup(iprop);
|
2010-08-06 17:16:12 +00:00
|
|
|
else
|
|
|
|
/* Older 8610 DTs didn't have the fifo-depth property */
|
|
|
|
dma->ssi_fifo_depth = 8;
|
|
|
|
|
|
|
|
of_node_put(ssi_np);
|
|
|
|
|
2010-08-19 20:26:58 +00:00
|
|
|
ret = snd_soc_register_platform(&pdev->dev, &dma->dai);
|
2010-03-17 20:15:21 +00:00
|
|
|
if (ret) {
|
2010-08-19 20:26:58 +00:00
|
|
|
dev_err(&pdev->dev, "could not register platform\n");
|
2010-03-17 20:15:21 +00:00
|
|
|
kfree(dma);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma->channel = of_iomap(np, 0);
|
|
|
|
dma->irq = irq_of_parse_and_map(np, 0);
|
2010-08-03 22:55:28 +00:00
|
|
|
|
2010-08-19 20:26:58 +00:00
|
|
|
dev_set_drvdata(&pdev->dev, dma);
|
2010-03-17 20:15:21 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-07 14:26:16 +00:00
|
|
|
static int fsl_soc_dma_remove(struct platform_device *pdev)
|
2008-01-11 17:15:26 +00:00
|
|
|
{
|
2010-08-19 20:26:58 +00:00
|
|
|
struct dma_object *dma = dev_get_drvdata(&pdev->dev);
|
2008-01-11 17:15:26 +00:00
|
|
|
|
2010-08-19 20:26:58 +00:00
|
|
|
snd_soc_unregister_platform(&pdev->dev);
|
2010-08-03 22:55:28 +00:00
|
|
|
iounmap(dma->channel);
|
|
|
|
irq_dispose_mapping(dma->irq);
|
|
|
|
kfree(dma);
|
2008-01-11 17:15:26 +00:00
|
|
|
|
2010-03-17 20:15:21 +00:00
|
|
|
return 0;
|
2008-01-11 17:15:26 +00:00
|
|
|
}
|
|
|
|
|
2010-03-17 20:15:21 +00:00
|
|
|
static const struct of_device_id fsl_soc_dma_ids[] = {
|
|
|
|
{ .compatible = "fsl,ssi-dma-channel", },
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, fsl_soc_dma_ids);
|
|
|
|
|
2011-02-23 04:05:04 +00:00
|
|
|
static struct platform_driver fsl_soc_dma_driver = {
|
2010-03-17 20:15:21 +00:00
|
|
|
.driver = {
|
|
|
|
.name = "fsl-pcm-audio",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.of_match_table = fsl_soc_dma_ids,
|
|
|
|
},
|
|
|
|
.probe = fsl_soc_dma_probe,
|
2012-12-07 14:26:16 +00:00
|
|
|
.remove = fsl_soc_dma_remove,
|
2010-03-17 20:15:21 +00:00
|
|
|
};
|
|
|
|
|
2011-11-25 02:10:55 +00:00
|
|
|
module_platform_driver(fsl_soc_dma_driver);
|
2008-12-03 19:58:17 +00:00
|
|
|
|
2008-01-11 17:15:26 +00:00
|
|
|
MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
|
2010-03-17 20:15:21 +00:00
|
|
|
MODULE_DESCRIPTION("Freescale Elo DMA ASoC PCM Driver");
|
|
|
|
MODULE_LICENSE("GPL v2");
|