forked from Minki/linux
mtd: nand: pxa3xx-nand: switch to dmaengine
Now pxa architecture has a dmaengine driver, remove the access to direct dma registers in favor of the more generic dmaengine code. This should be also applicable for mmp and orion, provided they work in device-tree environment. This patch also removes the previous hack which was necessary to make the driver work in a devicetree environment. Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr> Reviewed-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> Tested-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> [Brian: fixup use of 'enum dma_transfer_direction'] Signed-off-by: Brian Norris <computersforpeace@gmail.com>
This commit is contained in:
parent
e1305df128
commit
8f5ba31aa5
@ -15,7 +15,9 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma/pxa-dma.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
@ -33,10 +35,6 @@
|
||||
#define ARCH_HAS_DMA
|
||||
#endif
|
||||
|
||||
#ifdef ARCH_HAS_DMA
|
||||
#include <mach/dma.h>
|
||||
#endif
|
||||
|
||||
#include <linux/platform_data/mtd-nand-pxa3xx.h>
|
||||
|
||||
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
|
||||
@ -201,6 +199,10 @@ struct pxa3xx_nand_info {
|
||||
unsigned int oob_buff_pos;
|
||||
|
||||
/* DMA information */
|
||||
struct scatterlist sg;
|
||||
enum dma_data_direction dma_dir;
|
||||
struct dma_chan *dma_chan;
|
||||
dma_cookie_t dma_cookie;
|
||||
int drcmr_dat;
|
||||
int drcmr_cmd;
|
||||
|
||||
@ -208,8 +210,6 @@ struct pxa3xx_nand_info {
|
||||
unsigned char *oob_buff;
|
||||
dma_addr_t data_buff_phys;
|
||||
int data_dma_ch;
|
||||
struct pxa_dma_desc *data_desc;
|
||||
dma_addr_t data_desc_addr;
|
||||
|
||||
struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
|
||||
unsigned int state;
|
||||
@ -492,6 +492,9 @@ static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
|
||||
ndcr &= ~NDCR_ND_RUN;
|
||||
nand_writel(info, NDCR, ndcr);
|
||||
}
|
||||
if (info->dma_chan)
|
||||
dmaengine_terminate_all(info->dma_chan);
|
||||
|
||||
/* clear status bits */
|
||||
nand_writel(info, NDSR, NDSR_MASK);
|
||||
}
|
||||
@ -583,57 +586,61 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
|
||||
info->data_size -= do_bytes;
|
||||
}
|
||||
|
||||
#ifdef ARCH_HAS_DMA
|
||||
static void pxa3xx_nand_data_dma_irq(void *data)
|
||||
{
|
||||
struct pxa3xx_nand_info *info = data;
|
||||
struct dma_tx_state state;
|
||||
enum dma_status status;
|
||||
|
||||
status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
|
||||
if (likely(status == DMA_COMPLETE)) {
|
||||
info->state = STATE_DMA_DONE;
|
||||
} else {
|
||||
dev_err(&info->pdev->dev, "DMA error on data channel\n");
|
||||
info->retcode = ERR_DMABUSERR;
|
||||
}
|
||||
dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
|
||||
|
||||
nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
|
||||
enable_int(info, NDCR_INT_MASK);
|
||||
}
|
||||
|
||||
static void start_data_dma(struct pxa3xx_nand_info *info)
|
||||
{
|
||||
struct pxa_dma_desc *desc = info->data_desc;
|
||||
int dma_len = ALIGN(info->data_size + info->oob_size, 32);
|
||||
|
||||
desc->ddadr = DDADR_STOP;
|
||||
desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
|
||||
enum dma_transfer_direction direction;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
|
||||
switch (info->state) {
|
||||
case STATE_DMA_WRITING:
|
||||
desc->dsadr = info->data_buff_phys;
|
||||
desc->dtadr = info->mmio_phys + NDDB;
|
||||
desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
|
||||
info->dma_dir = DMA_TO_DEVICE;
|
||||
direction = DMA_MEM_TO_DEV;
|
||||
break;
|
||||
case STATE_DMA_READING:
|
||||
desc->dtadr = info->data_buff_phys;
|
||||
desc->dsadr = info->mmio_phys + NDDB;
|
||||
desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
|
||||
info->dma_dir = DMA_FROM_DEVICE;
|
||||
direction = DMA_DEV_TO_MEM;
|
||||
break;
|
||||
default:
|
||||
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
|
||||
info->state);
|
||||
BUG();
|
||||
}
|
||||
info->sg.length = info->data_size +
|
||||
(info->oob_size ? info->spare_size + info->ecc_size : 0);
|
||||
dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
|
||||
|
||||
DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
|
||||
DDADR(info->data_dma_ch) = info->data_desc_addr;
|
||||
DCSR(info->data_dma_ch) |= DCSR_RUN;
|
||||
}
|
||||
|
||||
static void pxa3xx_nand_data_dma_irq(int channel, void *data)
|
||||
{
|
||||
struct pxa3xx_nand_info *info = data;
|
||||
uint32_t dcsr;
|
||||
|
||||
dcsr = DCSR(channel);
|
||||
DCSR(channel) = dcsr;
|
||||
|
||||
if (dcsr & DCSR_BUSERR) {
|
||||
info->retcode = ERR_DMABUSERR;
|
||||
tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
|
||||
DMA_PREP_INTERRUPT);
|
||||
if (!tx) {
|
||||
dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
info->state = STATE_DMA_DONE;
|
||||
enable_int(info, NDCR_INT_MASK);
|
||||
nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
|
||||
tx->callback = pxa3xx_nand_data_dma_irq;
|
||||
tx->callback_param = info;
|
||||
info->dma_cookie = dmaengine_submit(tx);
|
||||
dma_async_issue_pending(info->dma_chan);
|
||||
dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
|
||||
__func__, direction, info->dma_cookie, info->sg.length);
|
||||
}
|
||||
#else
|
||||
static void start_data_dma(struct pxa3xx_nand_info *info)
|
||||
{}
|
||||
#endif
|
||||
|
||||
static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
|
||||
{
|
||||
@ -1319,36 +1326,50 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef ARCH_HAS_DMA
|
||||
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
|
||||
{
|
||||
struct platform_device *pdev = info->pdev;
|
||||
int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
|
||||
struct dma_slave_config config;
|
||||
dma_cap_mask_t mask;
|
||||
struct pxad_param param;
|
||||
int ret;
|
||||
|
||||
if (use_dma == 0) {
|
||||
info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
|
||||
if (info->data_buff == NULL)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
|
||||
&info->data_buff_phys, GFP_KERNEL);
|
||||
if (info->data_buff == NULL) {
|
||||
dev_err(&pdev->dev, "failed to allocate dma buffer\n");
|
||||
info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
|
||||
if (info->data_buff == NULL)
|
||||
return -ENOMEM;
|
||||
if (use_dma == 0)
|
||||
return 0;
|
||||
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sg_init_one(&info->sg, info->data_buff, info->buf_size);
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
param.prio = PXAD_PRIO_LOWEST;
|
||||
param.drcmr = info->drcmr_dat;
|
||||
info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
|
||||
¶m, &pdev->dev,
|
||||
"data");
|
||||
if (!info->dma_chan) {
|
||||
dev_err(&pdev->dev, "unable to request data dma channel\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
info->data_desc = (void *)info->data_buff + data_desc_offset;
|
||||
info->data_desc_addr = info->data_buff_phys + data_desc_offset;
|
||||
|
||||
info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
|
||||
pxa3xx_nand_data_dma_irq, info);
|
||||
if (info->data_dma_ch < 0) {
|
||||
dev_err(&pdev->dev, "failed to request data dma\n");
|
||||
dma_free_coherent(&pdev->dev, info->buf_size,
|
||||
info->data_buff, info->data_buff_phys);
|
||||
return info->data_dma_ch;
|
||||
memset(&config, 0, sizeof(config));
|
||||
config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
config.src_addr = info->mmio_phys + NDDB;
|
||||
config.dst_addr = info->mmio_phys + NDDB;
|
||||
config.src_maxburst = 32;
|
||||
config.dst_maxburst = 32;
|
||||
ret = dmaengine_slave_config(info->dma_chan, &config);
|
||||
if (ret < 0) {
|
||||
dev_err(&info->pdev->dev,
|
||||
"dma channel configuration failed: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1361,29 +1382,12 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
|
||||
|
||||
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
|
||||
{
|
||||
struct platform_device *pdev = info->pdev;
|
||||
if (info->use_dma) {
|
||||
pxa_free_dma(info->data_dma_ch);
|
||||
dma_free_coherent(&pdev->dev, info->buf_size,
|
||||
info->data_buff, info->data_buff_phys);
|
||||
} else {
|
||||
kfree(info->data_buff);
|
||||
dmaengine_terminate_all(info->dma_chan);
|
||||
dma_release_channel(info->dma_chan);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
|
||||
{
|
||||
info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
|
||||
if (info->data_buff == NULL)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
|
||||
{
|
||||
kfree(info->data_buff);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
|
||||
{
|
||||
@ -1683,34 +1687,23 @@ static int alloc_nand_resource(struct platform_device *pdev)
|
||||
return ret;
|
||||
|
||||
if (use_dma) {
|
||||
/*
|
||||
* This is a dirty hack to make this driver work from
|
||||
* devicetree bindings. It can be removed once we have
|
||||
* a prober DMA controller framework for DT.
|
||||
*/
|
||||
if (pdev->dev.of_node &&
|
||||
of_machine_is_compatible("marvell,pxa3xx")) {
|
||||
info->drcmr_dat = 97;
|
||||
info->drcmr_cmd = 99;
|
||||
} else {
|
||||
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
|
||||
if (r == NULL) {
|
||||
dev_err(&pdev->dev,
|
||||
"no resource defined for data DMA\n");
|
||||
ret = -ENXIO;
|
||||
goto fail_disable_clk;
|
||||
}
|
||||
info->drcmr_dat = r->start;
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
|
||||
if (r == NULL) {
|
||||
dev_err(&pdev->dev,
|
||||
"no resource defined for cmd DMA\n");
|
||||
ret = -ENXIO;
|
||||
goto fail_disable_clk;
|
||||
}
|
||||
info->drcmr_cmd = r->start;
|
||||
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
|
||||
if (r == NULL) {
|
||||
dev_err(&pdev->dev,
|
||||
"no resource defined for data DMA\n");
|
||||
ret = -ENXIO;
|
||||
goto fail_disable_clk;
|
||||
}
|
||||
info->drcmr_dat = r->start;
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
|
||||
if (r == NULL) {
|
||||
dev_err(&pdev->dev,
|
||||
"no resource defined for cmd DMA\n");
|
||||
ret = -ENXIO;
|
||||
goto fail_disable_clk;
|
||||
}
|
||||
info->drcmr_cmd = r->start;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
@ -1821,15 +1814,16 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
|
||||
struct pxa3xx_nand_platform_data *pdata;
|
||||
struct mtd_part_parser_data ppdata = {};
|
||||
struct pxa3xx_nand_info *info;
|
||||
int ret, cs, probe_success;
|
||||
int ret, cs, probe_success, dma_available;
|
||||
|
||||
#ifndef ARCH_HAS_DMA
|
||||
if (use_dma) {
|
||||
dma_available = IS_ENABLED(CONFIG_ARM) &&
|
||||
(IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
|
||||
if (use_dma && !dma_available) {
|
||||
use_dma = 0;
|
||||
dev_warn(&pdev->dev,
|
||||
"This platform can't do DMA on this device\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = pxa3xx_nand_probe_dt(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user