linux/drivers/mmc/host/mxs-mmc.c

811 lines
20 KiB
C
Raw Normal View History

/*
* Portions copyright (C) 2003 Russell King, PXA MMCI Driver
* Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
*
* Copyright 2008 Embedded Alley Solutions, Inc.
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/highmem.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/completion.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/stmp_device.h>
#include <linux/spi/mxs-spi.h>
#define DRIVER_NAME "mxs-mmc"
#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
BM_SSP_CTRL1_RESP_ERR_IRQ | \
BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
BM_SSP_CTRL1_DATA_CRC_IRQ | \
BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
/* card detect polling timeout */
#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
struct mxs_mmc_host {
struct mxs_ssp ssp;
struct mmc_host *mmc;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
unsigned char bus_width;
spinlock_t lock;
int sdio_irq_en;
int wp_gpio;
bool wp_inverted;
bool cd_inverted;
};
static int mxs_mmc_get_ro(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
int ret;
if (!gpio_is_valid(host->wp_gpio))
return -EINVAL;
ret = gpio_get_value(host->wp_gpio);
if (host->wp_inverted)
ret = !ret;
return ret;
}
static int mxs_mmc_get_cd(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
return !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
BM_SSP_STATUS_CARD_DETECT)) ^ host->cd_inverted;
}
static void mxs_mmc_reset(struct mxs_mmc_host *host)
{
struct mxs_ssp *ssp = &host->ssp;
u32 ctrl0, ctrl1;
stmp_reset_block(ssp->base);
ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
BF_SSP(0x7, CTRL1_WORD_LENGTH) |
BM_SSP_CTRL1_DMA_ENABLE |
BM_SSP_CTRL1_POLARITY |
BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
BF_SSP(2, TIMING_CLOCK_DIVIDE) |
BF_SSP(0, TIMING_CLOCK_RATE),
ssp->base + HW_SSP_TIMING(ssp));
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
}
writel(ctrl0, ssp->base + HW_SSP_CTRL0);
writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
}
static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
struct mmc_command *cmd);
static void mxs_mmc_request_done(struct mxs_mmc_host *host)
{
struct mmc_command *cmd = host->cmd;
struct mmc_data *data = host->data;
struct mmc_request *mrq = host->mrq;
struct mxs_ssp *ssp = &host->ssp;
if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
if (mmc_resp_type(cmd) & MMC_RSP_136) {
cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
} else {
cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
}
}
if (data) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, ssp->dma_dir);
/*
* If there was an error on any block, we mark all
* data blocks as being in error.
*/
if (!data->error)
data->bytes_xfered = data->blocks * data->blksz;
else
data->bytes_xfered = 0;
host->data = NULL;
if (mrq->stop) {
mxs_mmc_start_cmd(host, mrq->stop);
return;
}
}
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
}
static void mxs_mmc_dma_irq_callback(void *param)
{
struct mxs_mmc_host *host = param;
mxs_mmc_request_done(host);
}
static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
{
struct mxs_mmc_host *host = dev_id;
struct mmc_command *cmd = host->cmd;
struct mmc_data *data = host->data;
struct mxs_ssp *ssp = &host->ssp;
u32 stat;
spin_lock(&host->lock);
stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
writel(stat & MXS_MMC_IRQ_BITS,
ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
mmc: mxs-mmc: fix deadlock in SDIO IRQ case Release the lock before mmc_signal_sdio_irq is called by mxs_mmc_irq_handler. Backtrace: [ 79.660000] ============================================= [ 79.660000] [ INFO: possible recursive locking detected ] [ 79.660000] 3.4.0-00009-g3e96082-dirty #11 Not tainted [ 79.660000] --------------------------------------------- [ 79.660000] swapper/0 is trying to acquire lock: [ 79.660000] (&(&host->lock)->rlock#2){-.....}, at: [<c026ea3c>] mxs_mmc_enable_sdio_irq+0x18/0xd4 [ 79.660000] [ 79.660000] but task is already holding lock: [ 79.660000] (&(&host->lock)->rlock#2){-.....}, at: [<c026f744>] mxs_mmc_irq_handler+0x1c/0xe8 [ 79.660000] [ 79.660000] other info that might help us debug this: [ 79.660000] Possible unsafe locking scenario: [ 79.660000] [ 79.660000] CPU0 [ 79.660000] ---- [ 79.660000] lock(&(&host->lock)->rlock#2); [ 79.660000] lock(&(&host->lock)->rlock#2); [ 79.660000] [ 79.660000] *** DEADLOCK *** [ 79.660000] [ 79.660000] May be due to missing lock nesting notation [ 79.660000] [ 79.660000] 1 lock held by swapper/0: [ 79.660000] #0: (&(&host->lock)->rlock#2){-.....}, at: [<c026f744>] mxs_mmc_irq_handler+0x1c/0xe8 [ 79.660000] [ 79.660000] stack backtrace: [ 79.660000] [<c0014bd0>] (unwind_backtrace+0x0/0xf4) from [<c005f9c0>] (__lock_acquire+0x1948/0x1d48) [ 79.660000] [<c005f9c0>] (__lock_acquire+0x1948/0x1d48) from [<c005fea0>] (lock_acquire+0xe0/0xf8) [ 79.660000] [<c005fea0>] (lock_acquire+0xe0/0xf8) from [<c03a8460>] (_raw_spin_lock_irqsave+0x44/0x58) [ 79.660000] [<c03a8460>] (_raw_spin_lock_irqsave+0x44/0x58) from [<c026ea3c>] (mxs_mmc_enable_sdio_irq+0x18/0xd4) [ 79.660000] [<c026ea3c>] (mxs_mmc_enable_sdio_irq+0x18/0xd4) from [<c026f7fc>] (mxs_mmc_irq_handler+0xd4/0xe8) [ 79.660000] [<c026f7fc>] (mxs_mmc_irq_handler+0xd4/0xe8) from [<c006bdd8>] (handle_irq_event_percpu+0x70/0x254) [ 79.660000] [<c006bdd8>] (handle_irq_event_percpu+0x70/0x254) from [<c006bff8>] (handle_irq_event+0x3c/0x5c) [ 79.660000] [<c006bff8>] (handle_irq_event+0x3c/0x5c) from [<c006e6d0>] (handle_level_irq+0x90/0x110) [ 79.660000] [<c006e6d0>] (handle_level_irq+0x90/0x110) from [<c006b930>] (generic_handle_irq+0x38/0x50) [ 79.660000] [<c006b930>] (generic_handle_irq+0x38/0x50) from [<c00102fc>] (handle_IRQ+0x30/0x84) [ 79.660000] [<c00102fc>] (handle_IRQ+0x30/0x84) from [<c000f058>] (__irq_svc+0x38/0x60) [ 79.660000] [<c000f058>] (__irq_svc+0x38/0x60) from [<c0010520>] (default_idle+0x2c/0x40) [ 79.660000] [<c0010520>] (default_idle+0x2c/0x40) from [<c0010a90>] (cpu_idle+0x64/0xcc) [ 79.660000] [<c0010a90>] (cpu_idle+0x64/0xcc) from [<c04ff858>] (start_kernel+0x244/0x2c8) [ 79.660000] BUG: spinlock lockup on CPU#0, swapper/0 [ 79.660000] lock: c398cb2c, .magic: dead4ead, .owner: swapper/0, .owner_cpu: 0 [ 79.660000] [<c0014bd0>] (unwind_backtrace+0x0/0xf4) from [<c01ddb1c>] (do_raw_spin_lock+0xf0/0x144) [ 79.660000] [<c01ddb1c>] (do_raw_spin_lock+0xf0/0x144) from [<c03a8468>] (_raw_spin_lock_irqsave+0x4c/0x58) [ 79.660000] [<c03a8468>] (_raw_spin_lock_irqsave+0x4c/0x58) from [<c026ea3c>] (mxs_mmc_enable_sdio_irq+0x18/0xd4) [ 79.660000] [<c026ea3c>] (mxs_mmc_enable_sdio_irq+0x18/0xd4) from [<c026f7fc>] (mxs_mmc_irq_handler+0xd4/0xe8) [ 79.660000] [<c026f7fc>] (mxs_mmc_irq_handler+0xd4/0xe8) from [<c006bdd8>] (handle_irq_event_percpu+0x70/0x254) [ 79.660000] [<c006bdd8>] (handle_irq_event_percpu+0x70/0x254) from [<c006bff8>] (handle_irq_event+0x3c/0x5c) [ 79.660000] [<c006bff8>] (handle_irq_event+0x3c/0x5c) from [<c006e6d0>] (handle_level_irq+0x90/0x110) [ 79.660000] [<c006e6d0>] (handle_level_irq+0x90/0x110) from [<c006b930>] (generic_handle_irq+0x38/0x50) [ 79.660000] [<c006b930>] (generic_handle_irq+0x38/0x50) from [<c00102fc>] (handle_IRQ+0x30/0x84) [ 79.660000] [<c00102fc>] (handle_IRQ+0x30/0x84) from [<c000f058>] (__irq_svc+0x38/0x60) [ 79.660000] [<c000f058>] (__irq_svc+0x38/0x60) from [<c0010520>] (default_idle+0x2c/0x40) [ 79.660000] [<c0010520>] (default_idle+0x2c/0x40) from [<c0010a90>] (cpu_idle+0x64/0xcc) [ 79.660000] [<c0010a90>] (cpu_idle+0x64/0xcc) from [<c04ff858>] (start_kernel+0x244/0x2c8) Signed-off-by: Lauri Hintsala <lauri.hintsala@bluegiga.com> Acked-by: Shawn Guo <shawn.guo@linaro.org> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-07-17 14:16:09 +00:00
spin_unlock(&host->lock);
if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
mmc_signal_sdio_irq(host->mmc);
if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
cmd->error = -ETIMEDOUT;
else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
cmd->error = -EIO;
if (data) {
if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
data->error = -ETIMEDOUT;
else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
data->error = -EILSEQ;
else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
data->error = -EIO;
}
return IRQ_HANDLED;
}
static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
mxs-dma : rewrite the last parameter of mxs_dma_prep_slave_sg() [1] Background : The GPMI does ECC read page operation with a DMA chain consist of three DMA Command Structures. The middle one of the chain is used to enable the BCH, and read out the NAND page. The WAIT4END(wait for command end) is a comunication signal between the GPMI and MXS-DMA. [2] The current DMA code sets the WAIT4END bit at the last one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ | | set WAIT4END here This chain works fine in the mx23/mx28. [3] But in the new GPMI version (used in MX50/MX60), the WAIT4END bit should be set not only at the last DMA Command Structure, but also at the middle one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ ^ | | | | set WAIT4END here too set WAIT4END here If we do not set WAIT4END, the BCH maybe stalls in "ECC reading page" state. In the next ECC write page operation, a DMA-timeout occurs. This has been catched in the MX6Q board. [4] In order to fix the bug, rewrite the last parameter of mxs_dma_prep_slave_sg(), and use the dma_ctrl_flags: --------------------------------------------------------- DMA_PREP_INTERRUPT : append a new DMA Command Structrue. DMA_CTRL_ACK : set the WAIT4END bit for this DMA Command Structure. --------------------------------------------------------- [5] changes to the relative drivers: <1> For mxs-mmc driver, just use the new flags, do not change any logic. <2> For gpmi-nand driver, and use the new flags to set the DMA chain, especially for ecc read page. Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Huang Shijie <b32955@freescale.com> Acked-by: Vinod Koul <vinod.koul@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2012-02-16 06:17:33 +00:00
struct mxs_mmc_host *host, unsigned long flags)
{
struct mxs_ssp *ssp = &host->ssp;
struct dma_async_tx_descriptor *desc;
struct mmc_data *data = host->data;
struct scatterlist * sgl;
unsigned int sg_len;
if (data) {
/* data */
dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, ssp->dma_dir);
sgl = data->sg;
sg_len = data->sg_len;
} else {
/* pio */
sgl = (struct scatterlist *) ssp->ssp_pio_words;
sg_len = SSP_PIO_NUM;
}
desc = dmaengine_prep_slave_sg(ssp->dmach,
sgl, sg_len, ssp->slave_dirn, flags);
if (desc) {
desc->callback = mxs_mmc_dma_irq_callback;
desc->callback_param = host;
} else {
if (data)
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, ssp->dma_dir);
}
return desc;
}
static void mxs_mmc_bc(struct mxs_mmc_host *host)
{
struct mxs_ssp *ssp = &host->ssp;
struct mmc_command *cmd = host->cmd;
struct dma_async_tx_descriptor *desc;
u32 ctrl0, cmd0, cmd1;
ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
}
ssp->ssp_pio_words[0] = ctrl0;
ssp->ssp_pio_words[1] = cmd0;
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
mxs-dma : rewrite the last parameter of mxs_dma_prep_slave_sg() [1] Background : The GPMI does ECC read page operation with a DMA chain consist of three DMA Command Structures. The middle one of the chain is used to enable the BCH, and read out the NAND page. The WAIT4END(wait for command end) is a comunication signal between the GPMI and MXS-DMA. [2] The current DMA code sets the WAIT4END bit at the last one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ | | set WAIT4END here This chain works fine in the mx23/mx28. [3] But in the new GPMI version (used in MX50/MX60), the WAIT4END bit should be set not only at the last DMA Command Structure, but also at the middle one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ ^ | | | | set WAIT4END here too set WAIT4END here If we do not set WAIT4END, the BCH maybe stalls in "ECC reading page" state. In the next ECC write page operation, a DMA-timeout occurs. This has been catched in the MX6Q board. [4] In order to fix the bug, rewrite the last parameter of mxs_dma_prep_slave_sg(), and use the dma_ctrl_flags: --------------------------------------------------------- DMA_PREP_INTERRUPT : append a new DMA Command Structrue. DMA_CTRL_ACK : set the WAIT4END bit for this DMA Command Structure. --------------------------------------------------------- [5] changes to the relative drivers: <1> For mxs-mmc driver, just use the new flags, do not change any logic. <2> For gpmi-nand driver, and use the new flags to set the DMA chain, especially for ecc read page. Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Huang Shijie <b32955@freescale.com> Acked-by: Vinod Koul <vinod.koul@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2012-02-16 06:17:33 +00:00
desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
if (!desc)
goto out;
dmaengine_submit(desc);
dma_async_issue_pending(ssp->dmach);
return;
out:
dev_warn(mmc_dev(host->mmc),
"%s: failed to prep dma\n", __func__);
}
static void mxs_mmc_ac(struct mxs_mmc_host *host)
{
struct mxs_ssp *ssp = &host->ssp;
struct mmc_command *cmd = host->cmd;
struct dma_async_tx_descriptor *desc;
u32 ignore_crc, get_resp, long_resp;
u32 ctrl0, cmd0, cmd1;
ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
0 : BM_SSP_CTRL0_IGNORE_CRC;
get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
BM_SSP_CTRL0_GET_RESP : 0;
long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
BM_SSP_CTRL0_LONG_RESP : 0;
ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
cmd1 = cmd->arg;
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
}
ssp->ssp_pio_words[0] = ctrl0;
ssp->ssp_pio_words[1] = cmd0;
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
mxs-dma : rewrite the last parameter of mxs_dma_prep_slave_sg() [1] Background : The GPMI does ECC read page operation with a DMA chain consist of three DMA Command Structures. The middle one of the chain is used to enable the BCH, and read out the NAND page. The WAIT4END(wait for command end) is a comunication signal between the GPMI and MXS-DMA. [2] The current DMA code sets the WAIT4END bit at the last one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ | | set WAIT4END here This chain works fine in the mx23/mx28. [3] But in the new GPMI version (used in MX50/MX60), the WAIT4END bit should be set not only at the last DMA Command Structure, but also at the middle one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ ^ | | | | set WAIT4END here too set WAIT4END here If we do not set WAIT4END, the BCH maybe stalls in "ECC reading page" state. In the next ECC write page operation, a DMA-timeout occurs. This has been catched in the MX6Q board. [4] In order to fix the bug, rewrite the last parameter of mxs_dma_prep_slave_sg(), and use the dma_ctrl_flags: --------------------------------------------------------- DMA_PREP_INTERRUPT : append a new DMA Command Structrue. DMA_CTRL_ACK : set the WAIT4END bit for this DMA Command Structure. --------------------------------------------------------- [5] changes to the relative drivers: <1> For mxs-mmc driver, just use the new flags, do not change any logic. <2> For gpmi-nand driver, and use the new flags to set the DMA chain, especially for ecc read page. Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Huang Shijie <b32955@freescale.com> Acked-by: Vinod Koul <vinod.koul@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2012-02-16 06:17:33 +00:00
desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
if (!desc)
goto out;
dmaengine_submit(desc);
dma_async_issue_pending(ssp->dmach);
return;
out:
dev_warn(mmc_dev(host->mmc),
"%s: failed to prep dma\n", __func__);
}
static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
{
const unsigned int ssp_timeout_mul = 4096;
/*
* Calculate ticks in ms since ns are large numbers
* and might overflow
*/
const unsigned int clock_per_ms = clock_rate / 1000;
const unsigned int ms = ns / 1000;
const unsigned int ticks = ms * clock_per_ms;
const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
WARN_ON(ssp_ticks == 0);
return ssp_ticks;
}
static void mxs_mmc_adtc(struct mxs_mmc_host *host)
{
struct mmc_command *cmd = host->cmd;
struct mmc_data *data = cmd->data;
struct dma_async_tx_descriptor *desc;
struct scatterlist *sgl = data->sg, *sg;
unsigned int sg_len = data->sg_len;
unsigned int i;
unsigned short dma_data_dir, timeout;
enum dma_transfer_direction slave_dirn;
unsigned int data_size = 0, log2_blksz;
unsigned int blocks = data->blocks;
struct mxs_ssp *ssp = &host->ssp;
u32 ignore_crc, get_resp, long_resp, read;
u32 ctrl0, cmd0, cmd1, val;
ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
0 : BM_SSP_CTRL0_IGNORE_CRC;
get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
BM_SSP_CTRL0_GET_RESP : 0;
long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
BM_SSP_CTRL0_LONG_RESP : 0;
if (data->flags & MMC_DATA_WRITE) {
dma_data_dir = DMA_TO_DEVICE;
slave_dirn = DMA_MEM_TO_DEV;
read = 0;
} else {
dma_data_dir = DMA_FROM_DEVICE;
slave_dirn = DMA_DEV_TO_MEM;
read = BM_SSP_CTRL0_READ;
}
ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
ignore_crc | get_resp | long_resp |
BM_SSP_CTRL0_DATA_XFER | read |
BM_SSP_CTRL0_WAIT_FOR_IRQ |
BM_SSP_CTRL0_ENABLE;
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
/* get logarithm to base 2 of block size for setting register */
log2_blksz = ilog2(data->blksz);
/*
* take special care of the case that data size from data->sg
* is not equal to blocks x blksz
*/
for_each_sg(sgl, sg, sg_len, i)
data_size += sg->length;
if (data_size != data->blocks * data->blksz)
blocks = 1;
/* xfer count, block size and count need to be set differently */
if (ssp_is_old(ssp)) {
ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
} else {
writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
ssp->base + HW_SSP_BLOCK_SIZE);
}
if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
(cmd->opcode == SD_IO_RW_EXTENDED))
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
}
/* set the timeout count */
timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
val = readl(ssp->base + HW_SSP_TIMING(ssp));
val &= ~(BM_SSP_TIMING_TIMEOUT);
val |= BF_SSP(timeout, TIMING_TIMEOUT);
writel(val, ssp->base + HW_SSP_TIMING(ssp));
/* pio */
ssp->ssp_pio_words[0] = ctrl0;
ssp->ssp_pio_words[1] = cmd0;
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
/* append data sg */
WARN_ON(host->data != NULL);
host->data = data;
ssp->dma_dir = dma_data_dir;
ssp->slave_dirn = slave_dirn;
mxs-dma : rewrite the last parameter of mxs_dma_prep_slave_sg() [1] Background : The GPMI does ECC read page operation with a DMA chain consist of three DMA Command Structures. The middle one of the chain is used to enable the BCH, and read out the NAND page. The WAIT4END(wait for command end) is a comunication signal between the GPMI and MXS-DMA. [2] The current DMA code sets the WAIT4END bit at the last one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ | | set WAIT4END here This chain works fine in the mx23/mx28. [3] But in the new GPMI version (used in MX50/MX60), the WAIT4END bit should be set not only at the last DMA Command Structure, but also at the middle one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ ^ | | | | set WAIT4END here too set WAIT4END here If we do not set WAIT4END, the BCH maybe stalls in "ECC reading page" state. In the next ECC write page operation, a DMA-timeout occurs. This has been catched in the MX6Q board. [4] In order to fix the bug, rewrite the last parameter of mxs_dma_prep_slave_sg(), and use the dma_ctrl_flags: --------------------------------------------------------- DMA_PREP_INTERRUPT : append a new DMA Command Structrue. DMA_CTRL_ACK : set the WAIT4END bit for this DMA Command Structure. --------------------------------------------------------- [5] changes to the relative drivers: <1> For mxs-mmc driver, just use the new flags, do not change any logic. <2> For gpmi-nand driver, and use the new flags to set the DMA chain, especially for ecc read page. Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Huang Shijie <b32955@freescale.com> Acked-by: Vinod Koul <vinod.koul@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2012-02-16 06:17:33 +00:00
desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto out;
dmaengine_submit(desc);
dma_async_issue_pending(ssp->dmach);
return;
out:
dev_warn(mmc_dev(host->mmc),
"%s: failed to prep dma\n", __func__);
}
static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
struct mmc_command *cmd)
{
host->cmd = cmd;
switch (mmc_cmd_type(cmd)) {
case MMC_CMD_BC:
mxs_mmc_bc(host);
break;
case MMC_CMD_BCR:
mxs_mmc_ac(host);
break;
case MMC_CMD_AC:
mxs_mmc_ac(host);
break;
case MMC_CMD_ADTC:
mxs_mmc_adtc(host);
break;
default:
dev_warn(mmc_dev(host->mmc),
"%s: unknown MMC command\n", __func__);
break;
}
}
static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
WARN_ON(host->mrq != NULL);
host->mrq = mrq;
mxs_mmc_start_cmd(host, mrq->cmd);
}
static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
if (ios->bus_width == MMC_BUS_WIDTH_8)
host->bus_width = 2;
else if (ios->bus_width == MMC_BUS_WIDTH_4)
host->bus_width = 1;
else
host->bus_width = 0;
if (ios->clock)
mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
}
static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->sdio_irq_en = enable;
if (enable) {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
} else {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
}
spin_unlock_irqrestore(&host->lock, flags);
mmc: mxs-mmc: fix deadlock caused by recursion loop Release the lock before mmc_signal_sdio_irq is called by mxs_mmc_enable_sdio_irq. Backtrace: [ 65.470000] ============================================= [ 65.470000] [ INFO: possible recursive locking detected ] [ 65.470000] 3.5.0-rc5 #2 Not tainted [ 65.470000] --------------------------------------------- [ 65.470000] ksdioirqd/mmc0/73 is trying to acquire lock: [ 65.470000] (&(&host->lock)->rlock#2){-.-...}, at: [<bf054120>] mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc] [ 65.470000] [ 65.470000] but task is already holding lock: [ 65.470000] (&(&host->lock)->rlock#2){-.-...}, at: [<bf054120>] mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc] [ 65.470000] [ 65.470000] other info that might help us debug this: [ 65.470000] Possible unsafe locking scenario: [ 65.470000] [ 65.470000] CPU0 [ 65.470000] ---- [ 65.470000] lock(&(&host->lock)->rlock#2); [ 65.470000] lock(&(&host->lock)->rlock#2); [ 65.470000] [ 65.470000] *** DEADLOCK *** [ 65.470000] [ 65.470000] May be due to missing lock nesting notation [ 65.470000] [ 65.470000] 1 lock held by ksdioirqd/mmc0/73: [ 65.470000] #0: (&(&host->lock)->rlock#2){-.-...}, at: [<bf054120>] mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc] [ 65.470000] [ 65.470000] stack backtrace: [ 65.470000] [<c0014990>] (unwind_backtrace+0x0/0xf4) from [<c005ccb8>] (__lock_acquire+0x14f8/0x1b98) [ 65.470000] [<c005ccb8>] (__lock_acquire+0x14f8/0x1b98) from [<c005d3f8>] (lock_acquire+0xa0/0x108) [ 65.470000] [<c005d3f8>] (lock_acquire+0xa0/0x108) from [<c02f671c>] (_raw_spin_lock_irqsave+0x48/0x5c) [ 65.470000] [<c02f671c>] (_raw_spin_lock_irqsave+0x48/0x5c) from [<bf054120>] (mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc]) [ 65.470000] [<bf054120>] (mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc]) from [<bf0541d0>] (mxs_mmc_enable_sdio_irq+0xc8/0xdc [mxs_mmc]) [ 65.470000] [<bf0541d0>] (mxs_mmc_enable_sdio_irq+0xc8/0xdc [mxs_mmc]) from [<c0219b38>] (sdio_irq_thread+0x1bc/0x274) [ 65.470000] [<c0219b38>] (sdio_irq_thread+0x1bc/0x274) from [<c003c324>] (kthread+0x8c/0x98) [ 65.470000] [<c003c324>] (kthread+0x8c/0x98) from [<c00101ac>] (kernel_thread_exit+0x0/0x8) [ 65.470000] BUG: spinlock lockup suspected on CPU#0, ksdioirqd/mmc0/73 [ 65.470000] lock: 0xc3358724, .magic: dead4ead, .owner: ksdioirqd/mmc0/73, .owner_cpu: 0 [ 65.470000] [<c0014990>] (unwind_backtrace+0x0/0xf4) from [<c01b46b0>] (do_raw_spin_lock+0x100/0x144) [ 65.470000] [<c01b46b0>] (do_raw_spin_lock+0x100/0x144) from [<c02f6724>] (_raw_spin_lock_irqsave+0x50/0x5c) [ 65.470000] [<c02f6724>] (_raw_spin_lock_irqsave+0x50/0x5c) from [<bf054120>] (mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc]) [ 65.470000] [<bf054120>] (mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc]) from [<bf0541d0>] (mxs_mmc_enable_sdio_irq+0xc8/0xdc [mxs_mmc]) [ 65.470000] [<bf0541d0>] (mxs_mmc_enable_sdio_irq+0xc8/0xdc [mxs_mmc]) from [<c0219b38>] (sdio_irq_thread+0x1bc/0x274) [ 65.470000] [<c0219b38>] (sdio_irq_thread+0x1bc/0x274) from [<c003c324>] (kthread+0x8c/0x98) [ 65.470000] [<c003c324>] (kthread+0x8c/0x98) from [<c00101ac>] (kernel_thread_exit+0x0/0x8) Reported-by: Attila Kinali <attila@kinali.ch> Signed-off-by: Lauri Hintsala <lauri.hintsala@bluegiga.com> Acked-by: Shawn Guo <shawn.guo@linaro.org> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-07-17 14:16:10 +00:00
if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
mmc: mxs-mmc: fix deadlock caused by recursion loop Release the lock before mmc_signal_sdio_irq is called by mxs_mmc_enable_sdio_irq. Backtrace: [ 65.470000] ============================================= [ 65.470000] [ INFO: possible recursive locking detected ] [ 65.470000] 3.5.0-rc5 #2 Not tainted [ 65.470000] --------------------------------------------- [ 65.470000] ksdioirqd/mmc0/73 is trying to acquire lock: [ 65.470000] (&(&host->lock)->rlock#2){-.-...}, at: [<bf054120>] mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc] [ 65.470000] [ 65.470000] but task is already holding lock: [ 65.470000] (&(&host->lock)->rlock#2){-.-...}, at: [<bf054120>] mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc] [ 65.470000] [ 65.470000] other info that might help us debug this: [ 65.470000] Possible unsafe locking scenario: [ 65.470000] [ 65.470000] CPU0 [ 65.470000] ---- [ 65.470000] lock(&(&host->lock)->rlock#2); [ 65.470000] lock(&(&host->lock)->rlock#2); [ 65.470000] [ 65.470000] *** DEADLOCK *** [ 65.470000] [ 65.470000] May be due to missing lock nesting notation [ 65.470000] [ 65.470000] 1 lock held by ksdioirqd/mmc0/73: [ 65.470000] #0: (&(&host->lock)->rlock#2){-.-...}, at: [<bf054120>] mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc] [ 65.470000] [ 65.470000] stack backtrace: [ 65.470000] [<c0014990>] (unwind_backtrace+0x0/0xf4) from [<c005ccb8>] (__lock_acquire+0x14f8/0x1b98) [ 65.470000] [<c005ccb8>] (__lock_acquire+0x14f8/0x1b98) from [<c005d3f8>] (lock_acquire+0xa0/0x108) [ 65.470000] [<c005d3f8>] (lock_acquire+0xa0/0x108) from [<c02f671c>] (_raw_spin_lock_irqsave+0x48/0x5c) [ 65.470000] [<c02f671c>] (_raw_spin_lock_irqsave+0x48/0x5c) from [<bf054120>] (mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc]) [ 65.470000] [<bf054120>] (mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc]) from [<bf0541d0>] (mxs_mmc_enable_sdio_irq+0xc8/0xdc [mxs_mmc]) [ 65.470000] [<bf0541d0>] (mxs_mmc_enable_sdio_irq+0xc8/0xdc [mxs_mmc]) from [<c0219b38>] (sdio_irq_thread+0x1bc/0x274) [ 65.470000] [<c0219b38>] (sdio_irq_thread+0x1bc/0x274) from [<c003c324>] (kthread+0x8c/0x98) [ 65.470000] [<c003c324>] (kthread+0x8c/0x98) from [<c00101ac>] (kernel_thread_exit+0x0/0x8) [ 65.470000] BUG: spinlock lockup suspected on CPU#0, ksdioirqd/mmc0/73 [ 65.470000] lock: 0xc3358724, .magic: dead4ead, .owner: ksdioirqd/mmc0/73, .owner_cpu: 0 [ 65.470000] [<c0014990>] (unwind_backtrace+0x0/0xf4) from [<c01b46b0>] (do_raw_spin_lock+0x100/0x144) [ 65.470000] [<c01b46b0>] (do_raw_spin_lock+0x100/0x144) from [<c02f6724>] (_raw_spin_lock_irqsave+0x50/0x5c) [ 65.470000] [<c02f6724>] (_raw_spin_lock_irqsave+0x50/0x5c) from [<bf054120>] (mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc]) [ 65.470000] [<bf054120>] (mxs_mmc_enable_sdio_irq+0x18/0xdc [mxs_mmc]) from [<bf0541d0>] (mxs_mmc_enable_sdio_irq+0xc8/0xdc [mxs_mmc]) [ 65.470000] [<bf0541d0>] (mxs_mmc_enable_sdio_irq+0xc8/0xdc [mxs_mmc]) from [<c0219b38>] (sdio_irq_thread+0x1bc/0x274) [ 65.470000] [<c0219b38>] (sdio_irq_thread+0x1bc/0x274) from [<c003c324>] (kthread+0x8c/0x98) [ 65.470000] [<c003c324>] (kthread+0x8c/0x98) from [<c00101ac>] (kernel_thread_exit+0x0/0x8) Reported-by: Attila Kinali <attila@kinali.ch> Signed-off-by: Lauri Hintsala <lauri.hintsala@bluegiga.com> Acked-by: Shawn Guo <shawn.guo@linaro.org> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-07-17 14:16:10 +00:00
BM_SSP_STATUS_SDIO_IRQ)
mmc_signal_sdio_irq(host->mmc);
}
static const struct mmc_host_ops mxs_mmc_ops = {
.request = mxs_mmc_request,
.get_ro = mxs_mmc_get_ro,
.get_cd = mxs_mmc_get_cd,
.set_ios = mxs_mmc_set_ios,
.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
};
static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
{
struct mxs_mmc_host *host = param;
struct mxs_ssp *ssp = &host->ssp;
if (!mxs_dma_is_apbh(chan))
return false;
if (chan->chan_id != ssp->dma_channel)
return false;
chan->private = &ssp->dma_data;
return true;
}
static struct platform_device_id mxs_ssp_ids[] = {
{
.name = "imx23-mmc",
.driver_data = IMX23_SSP,
}, {
.name = "imx28-mmc",
.driver_data = IMX28_SSP,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
static const struct of_device_id mxs_mmc_dt_ids[] = {
{ .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
{ .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
static int mxs_mmc_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_mmc_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
struct resource *iores, *dmares;
struct pinctrl *pinctrl;
int ret = 0, irq_err, irq_dma;
dma_cap_mask_t mask;
struct regulator *reg_vmmc;
enum of_gpio_flags flags;
struct mxs_ssp *ssp;
u32 bus_width = 0;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
irq_err = platform_get_irq(pdev, 0);
irq_dma = platform_get_irq(pdev, 1);
if (!iores || irq_err < 0 || irq_dma < 0)
return -EINVAL;
mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
ssp = &host->ssp;
ssp->dev = &pdev->dev;
ssp->base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(ssp->base)) {
ret = PTR_ERR(ssp->base);
goto out_mmc_free;
}
if (np) {
ssp->devid = (enum mxs_ssp_id) of_id->data;
/*
* TODO: This is a temporary solution and should be changed
* to use generic DMA binding later when the helpers get in.
*/
ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
&ssp->dma_channel);
if (ret) {
dev_err(mmc_dev(host->mmc),
"failed to get dma channel\n");
goto out_mmc_free;
}
} else {
ssp->devid = pdev->id_entry->driver_data;
ssp->dma_channel = dmares->start;
}
host->mmc = mmc;
host->sdio_irq_en = 0;
reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
if (!IS_ERR(reg_vmmc)) {
ret = regulator_enable(reg_vmmc);
if (ret) {
dev_err(&pdev->dev,
"Failed to enable vmmc regulator: %d\n", ret);
goto out_mmc_free;
}
}
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
goto out_mmc_free;
}
ssp->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(ssp->clk)) {
ret = PTR_ERR(ssp->clk);
goto out_mmc_free;
}
clk_prepare_enable(ssp->clk);
mxs_mmc_reset(host);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
ssp->dma_data.chan_irq = irq_dma;
ssp->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
if (!ssp->dmach) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
goto out_clk_put;
}
/* set mmc core parameters */
mmc->ops = &mxs_mmc_ops;
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
of_property_read_u32(np, "bus-width", &bus_width);
if (bus_width == 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
else if (bus_width == 8)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags);
if (flags & OF_GPIO_ACTIVE_LOW)
host->wp_inverted = 1;
host->cd_inverted = of_property_read_bool(np, "cd-inverted");
mmc->f_min = 400000;
mmc->f_max = 288000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->max_segs = 52;
mmc->max_blk_size = 1 << 0xf;
mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
platform_set_drvdata(pdev, mmc);
ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
DRIVER_NAME, host);
if (ret)
goto out_free_dma;
spin_lock_init(&host->lock);
ret = mmc_add_host(mmc);
if (ret)
goto out_free_dma;
dev_info(mmc_dev(host->mmc), "initialized\n");
return 0;
out_free_dma:
if (ssp->dmach)
dma_release_channel(ssp->dmach);
out_clk_put:
clk_disable_unprepare(ssp->clk);
clk_put(ssp->clk);
out_mmc_free:
mmc_free_host(mmc);
return ret;
}
static int mxs_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
mmc_remove_host(mmc);
platform_set_drvdata(pdev, NULL);
if (ssp->dmach)
dma_release_channel(ssp->dmach);
clk_disable_unprepare(ssp->clk);
clk_put(ssp->clk);
mmc_free_host(mmc);
return 0;
}
#ifdef CONFIG_PM
static int mxs_mmc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
int ret = 0;
ret = mmc_suspend_host(mmc);
clk_disable_unprepare(ssp->clk);
return ret;
}
static int mxs_mmc_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxs_mmc_host *host = mmc_priv(mmc);
struct mxs_ssp *ssp = &host->ssp;
int ret = 0;
clk_prepare_enable(ssp->clk);
ret = mmc_resume_host(mmc);
return ret;
}
static const struct dev_pm_ops mxs_mmc_pm_ops = {
.suspend = mxs_mmc_suspend,
.resume = mxs_mmc_resume,
};
#endif
static struct platform_driver mxs_mmc_driver = {
.probe = mxs_mmc_probe,
.remove = mxs_mmc_remove,
.id_table = mxs_ssp_ids,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &mxs_mmc_pm_ops,
#endif
.of_match_table = mxs_mmc_dt_ids,
},
};
module_platform_driver(mxs_mmc_driver);
MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
MODULE_AUTHOR("Freescale Semiconductor");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);