linux/drivers/mmc/host/dw_mmc.c

3409 lines
86 KiB
C
Raw Normal View History

/*
* Synopsys DesignWare Multimedia Card Interface driver
* (Based on NXP driver for lpc 31xx)
*
* Copyright (C) 2009 NXP Semiconductors
* Copyright (C) 2009, 2010 Imagination Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/blkdev.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
mmc: dw_mmc: Don't allow Runtime PM for SDIO cards According to the SDIO standard interrupts are normally signalled in a very complicated way. They require the card clock to be running and require the controller to be paying close attention to the signals coming from the card. This simply can't happen with the clock stopped or with the controller in a low power mode. To that end, we'll disable runtime_pm when we detect that an SDIO card was inserted. This is much like with what we do with the special "SDMMC_CLKEN_LOW_PWR" bit that dw_mmc supports. NOTE: we specifically do this Runtime PM disabling at card init time rather than in the enable_sdio_irq() callback. This is _different_ than how SDHCI does it. Why do we do it differently? - Unlike SDHCI, dw_mmc uses the standard sdio_irq code in Linux (AKA dw_mmc doesn't set MMC_CAP2_SDIO_IRQ_NOTHREAD). - Because we use the standard sdio_irq code: - We see a constant stream of enable_sdio_irq(0) and enable_sdio_irq(1) calls. This is because the standard code disables interrupts while processing and re-enables them after. - While interrupts are disabled, there's technically a period where we could get runtime disabled while processing interrupts. - If we are runtime disabled while processing interrupts, we'll reset the controller at resume time (see dw_mci_runtime_resume), which seems like a terrible idea because we could possibly have another interrupt pending. To fix the above isues we'd want to put something in the standard sdio_irq code that makes sure to call pm_runtime get/put when interrupts are being actively being processed. That's possible to do, but it seems like a more complicated mechanism when we really just want the runtime pm disabled always for SDIO cards given that all the other bits needed to get Runtime PM vs. SDIO just aren't there. NOTE: at some point in time someone might come up with a fancy way to do SDIO interrupts and still allow (some) amount of runtime PM. Technically we could turn off the card clock if we used an alternate way of signaling SDIO interrupts (and out of band interrupt is one way to do this). We probably wouldn't actually want to fully runtime suspend in this case though--at least not with the current dw_mci_runtime_resume() which basically fully resets the controller at resume time. Fixes: e9ed8835e990 ("mmc: dw_mmc: add runtime PM callback") Cc: <stable@vger.kernel.org> Reported-by: Brian Norris <briannorris@chromium.org> Signed-off-by: Douglas Anderson <dianders@chromium.org> Acked-by: Jaehoon Chung <jh80.chung@samsung.com> Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-04-11 22:55:43 +00:00
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include "dw_mmc.h"
/* Common flag combinations */
#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
SDMMC_INT_HTO | SDMMC_INT_SBE | \
SDMMC_INT_EBE | SDMMC_INT_HLE)
#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
DW_MCI_CMD_ERROR_FLAGS)
#define DW_MCI_SEND_STATUS 1
#define DW_MCI_RECV_STATUS 2
#define DW_MCI_DMA_THRESHOLD 16
#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
SDMMC_IDMAC_INT_TI)
#define DESC_RING_BUF_SZ PAGE_SIZE
struct idmac_desc_64addr {
u32 des0; /* Control Descriptor */
#define IDMAC_OWN_CLR64(x) \
!((x) & cpu_to_le32(IDMAC_DES0_OWN))
u32 des1; /* Reserved */
u32 des2; /*Buffer sizes */
#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
u32 des3; /* Reserved */
u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
u32 des6; /* Lower 32-bits of Next Descriptor Address */
u32 des7; /* Upper 32-bits of Next Descriptor Address */
};
struct idmac_desc {
__le32 des0; /* Control Descriptor */
#define IDMAC_DES0_DIC BIT(1)
#define IDMAC_DES0_LD BIT(2)
#define IDMAC_DES0_FD BIT(3)
#define IDMAC_DES0_CH BIT(4)
#define IDMAC_DES0_ER BIT(5)
#define IDMAC_DES0_CES BIT(30)
#define IDMAC_DES0_OWN BIT(31)
__le32 des1; /* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
__le32 des2; /* buffer 1 physical address */
__le32 des3; /* buffer 2 physical address */
};
/* Each descriptor can transfer up to 4KB of data in chained mode */
#define DW_MCI_DESC_DATA_LENGTH 0x1000
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
struct dw_mci_slot *slot = s->private;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_command *stop;
struct mmc_data *data;
/* Make sure we get a consistent snapshot */
spin_lock_bh(&slot->host->lock);
mrq = slot->mrq;
if (mrq) {
cmd = mrq->cmd;
data = mrq->data;
stop = mrq->stop;
if (cmd)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
cmd->opcode, cmd->arg, cmd->flags,
cmd->resp[0], cmd->resp[1], cmd->resp[2],
cmd->resp[2], cmd->error);
if (data)
seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
data->bytes_xfered, data->blocks,
data->blksz, data->flags, data->error);
if (stop)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
stop->opcode, stop->arg, stop->flags,
stop->resp[0], stop->resp[1], stop->resp[2],
stop->resp[2], stop->error);
}
spin_unlock_bh(&slot->host->lock);
return 0;
}
static int dw_mci_req_open(struct inode *inode, struct file *file)
{
return single_open(file, dw_mci_req_show, inode->i_private);
}
static const struct file_operations dw_mci_req_fops = {
.owner = THIS_MODULE,
.open = dw_mci_req_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dw_mci_regs_show(struct seq_file *s, void *v)
{
struct dw_mci *host = s->private;
seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
return 0;
}
static int dw_mci_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, dw_mci_regs_show, inode->i_private);
}
static const struct file_operations dw_mci_regs_fops = {
.owner = THIS_MODULE,
.open = dw_mci_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
{
struct mmc_host *mmc = slot->mmc;
struct dw_mci *host = slot->host;
struct dentry *root;
struct dentry *node;
root = mmc->debugfs_root;
if (!root)
return;
node = debugfs_create_file("regs", S_IRUSR, root, host,
&dw_mci_regs_fops);
if (!node)
goto err;
node = debugfs_create_file("req", S_IRUSR, root, slot,
&dw_mci_req_fops);
if (!node)
goto err;
node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
if (!node)
goto err;
node = debugfs_create_x32("pending_events", S_IRUSR, root,
(u32 *)&host->pending_events);
if (!node)
goto err;
node = debugfs_create_x32("completed_events", S_IRUSR, root,
(u32 *)&host->completed_events);
if (!node)
goto err;
return;
err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
#endif /* defined(CONFIG_DEBUG_FS) */
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
{
u32 ctrl;
ctrl = mci_readl(host, CTRL);
ctrl |= reset;
mci_writel(host, CTRL, ctrl);
/* wait till resets clear */
if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
!(ctrl & reset),
1, 500 * USEC_PER_MSEC)) {
dev_err(host->dev,
"Timeout resetting block (ctrl reset %#x)\n",
ctrl & reset);
return false;
}
return true;
}
static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
{
u32 status;
/*
* Databook says that before issuing a new data transfer command
* we need to check to see if the card is busy. Data transfer commands
* all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
*
* ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
* expected.
*/
if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
!(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
status,
!(status & SDMMC_STATUS_BUSY),
10, 500 * USEC_PER_MSEC))
dev_err(host->dev, "Busy; trying anyway\n");
}
}
static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
{
struct dw_mci *host = slot->host;
unsigned int cmd_status = 0;
mci_writel(host, CMDARG, arg);
wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd);
mci_writel(host, CMD, SDMMC_CMD_START | cmd);
if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
!(cmd_status & SDMMC_CMD_START),
1, 500 * USEC_PER_MSEC))
dev_err(&slot->mmc->class_dev,
"Timeout sending command (cmd %#x arg %#x status %#x)\n",
cmd, arg, cmd_status);
}
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
u32 cmdr;
cmd->error = -EINPROGRESS;
cmdr = cmd->opcode;
if (cmd->opcode == MMC_STOP_TRANSMISSION ||
cmd->opcode == MMC_GO_IDLE_STATE ||
cmd->opcode == MMC_GO_INACTIVE_STATE ||
(cmd->opcode == SD_IO_RW_DIRECT &&
((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
cmdr |= SDMMC_CMD_STOP;
else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
u32 clk_en_a;
/* Special bit makes CMD11 not die */
cmdr |= SDMMC_CMD_VOLT_SWITCH;
/* Change state to continue to handle CMD11 weirdness */
WARN_ON(slot->host->state != STATE_SENDING_CMD);
slot->host->state = STATE_SENDING_CMD11;
/*
* We need to disable low power mode (automatic clock stop)
* while doing voltage switch so we don't confuse the card,
* since stopping the clock is a specific part of the UHS
* voltage change dance.
*
* Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
* unconditionally turned back on in dw_mci_setup_bus() if it's
* ever called with a non-zero clock. That shouldn't happen
* until the voltage change is all done.
*/
clk_en_a = mci_readl(host, CLKENA);
clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
mci_writel(host, CLKENA, clk_en_a);
mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
SDMMC_CMD_PRV_DAT_WAIT, 0);
}
if (cmd->flags & MMC_RSP_PRESENT) {
/* We expect a response, so set this bit */
cmdr |= SDMMC_CMD_RESP_EXP;
if (cmd->flags & MMC_RSP_136)
cmdr |= SDMMC_CMD_RESP_LONG;
}
if (cmd->flags & MMC_RSP_CRC)
cmdr |= SDMMC_CMD_RESP_CRC;
if (cmd->data) {
cmdr |= SDMMC_CMD_DAT_EXP;
if (cmd->data->flags & MMC_DATA_WRITE)
cmdr |= SDMMC_CMD_DAT_WR;
}
if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
cmdr |= SDMMC_CMD_USE_HOLD_REG;
return cmdr;
}
static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
{
struct mmc_command *stop;
u32 cmdr;
if (!cmd->data)
return 0;
stop = &host->stop_abort;
cmdr = cmd->opcode;
memset(stop, 0, sizeof(struct mmc_command));
if (cmdr == MMC_READ_SINGLE_BLOCK ||
cmdr == MMC_READ_MULTIPLE_BLOCK ||
cmdr == MMC_WRITE_BLOCK ||
cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
cmdr == MMC_SEND_TUNING_BLOCK ||
cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
stop->opcode = MMC_STOP_TRANSMISSION;
stop->arg = 0;
stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
} else if (cmdr == SD_IO_RW_EXTENDED) {
stop->opcode = SD_IO_RW_DIRECT;
stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
((cmd->arg >> 28) & 0x7);
stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
} else {
return 0;
}
cmdr = stop->opcode | SDMMC_CMD_STOP |
SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags))
cmdr |= SDMMC_CMD_USE_HOLD_REG;
return cmdr;
}
static void dw_mci_start_command(struct dw_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
host->cmd = cmd;
dev_vdbg(host->dev,
"start command: ARGR=0x%08x CMDR=0x%08x\n",
cmd->arg, cmd_flags);
mci_writel(host, CMDARG, cmd->arg);
wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd_flags);
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
}
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
{
struct mmc_command *stop = &host->stop_abort;
dw_mci_start_command(host, stop, host->stop_cmdr);
}
/* DMA interface functions */
static void dw_mci_stop_dma(struct dw_mci *host)
{
if (host->using_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
}
/* Data transfer was stopped by the interrupt handler */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
if (data && data->host_cookie == COOKIE_MAPPED) {
dma_unmap_sg(host->dev,
data->sg,
data->sg_len,
mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
}
static void dw_mci_idmac_reset(struct dw_mci *host)
{
u32 bmod = mci_readl(host, BMOD);
/* Software reset of DMA */
bmod |= SDMMC_IDMAC_SWRESET;
mci_writel(host, BMOD, bmod);
}
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
{
u32 temp;
/* Disable and reset the IDMAC interface */
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_USE_IDMAC;
temp |= SDMMC_CTRL_DMA_RESET;
mci_writel(host, CTRL, temp);
/* Stop the IDMAC running */
temp = mci_readl(host, BMOD);
temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
temp |= SDMMC_IDMAC_SWRESET;
mci_writel(host, BMOD, temp);
}
static void dw_mci_dmac_complete_dma(void *arg)
{
struct dw_mci *host = arg;
struct mmc_data *data = host->data;
dev_vdbg(host->dev, "DMA complete\n");
if ((host->use_dma == TRANS_MODE_EDMAC) &&
data && (data->flags & MMC_DATA_READ))
/* Invalidate cache after read */
dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
data->sg,
data->sg_len,
DMA_FROM_DEVICE);
host->dma_ops->cleanup(host);
/*
* If the card was removed, data will be NULL. No point in trying to
* send the stop command or waiting for NBUSY in this case.
*/
if (data) {
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
}
static int dw_mci_idmac_init(struct dw_mci *host)
{
int i;
if (host->dma_64bit_address == 1) {
struct idmac_desc_64addr *p;
/* Number of descriptors in the ring buffer */
host->ring_size =
DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
i++, p++) {
p->des6 = (host->sg_dma +
(sizeof(struct idmac_desc_64addr) *
(i + 1))) & 0xffffffff;
p->des7 = (u64)(host->sg_dma +
(sizeof(struct idmac_desc_64addr) *
(i + 1))) >> 32;
/* Initialize reserved and buffer size fields to "0" */
p->des1 = 0;
p->des2 = 0;
p->des3 = 0;
}
/* Set the last descriptor as the end-of-ring descriptor */
p->des6 = host->sg_dma & 0xffffffff;
p->des7 = (u64)host->sg_dma >> 32;
p->des0 = IDMAC_DES0_ER;
} else {
struct idmac_desc *p;
/* Number of descriptors in the ring buffer */
host->ring_size =
DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu;
i < host->ring_size - 1;
i++, p++) {
p->des3 = cpu_to_le32(host->sg_dma +
(sizeof(struct idmac_desc) * (i + 1)));
p->des1 = 0;
}
/* Set the last descriptor as the end-of-ring descriptor */
p->des3 = cpu_to_le32(host->sg_dma);
p->des0 = cpu_to_le32(IDMAC_DES0_ER);
}
dw_mci_idmac_reset(host);
if (host->dma_64bit_address == 1) {
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDSTS64, IDMAC_INT_CLR);
mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
/* Set the descriptor base address */
mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
} else {
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDSTS, IDMAC_INT_CLR);
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
/* Set the descriptor base address */
mci_writel(host, DBADDR, host->sg_dma);
}
return 0;
}
static inline int dw_mci_prepare_desc64(struct dw_mci *host,
struct mmc_data *data,
unsigned int sg_len)
{
unsigned int desc_len;
struct idmac_desc_64addr *desc_first, *desc_last, *desc;
u32 val;
int i;
desc_first = desc_last = desc = host->sg_cpu;
for (i = 0; i < sg_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
u64 mem_addr = sg_dma_address(&data->sg[i]);
for ( ; length ; desc++) {
desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
length : DW_MCI_DESC_DATA_LENGTH;
length -= desc_len;
/*
* Wait for the former clear OWN bit operation
* of IDMAC to make sure that this descriptor
* isn't still owned by IDMAC as IDMAC's write
* ops and CPU's read ops are asynchronous.
*/
if (readl_poll_timeout_atomic(&desc->des0, val,
!(val & IDMAC_DES0_OWN),
10, 100 * USEC_PER_MSEC))
goto err_own_bit;
/*
* Set the OWN bit and disable interrupts
* for this descriptor
*/
desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
IDMAC_DES0_CH;
/* Buffer length */
IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
/* Physical address to DMA to/from */
desc->des4 = mem_addr & 0xffffffff;
desc->des5 = mem_addr >> 32;
/* Update physical address for the next desc */
mem_addr += desc_len;
/* Save pointer to the last descriptor */
desc_last = desc;
}
}
/* Set first descriptor */
desc_first->des0 |= IDMAC_DES0_FD;
/* Set last descriptor */
desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
desc_last->des0 |= IDMAC_DES0_LD;
return 0;
err_own_bit:
/* restore the descriptor chain as it's polluted */
dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
dw_mci_idmac_init(host);
return -EINVAL;
}
static inline int dw_mci_prepare_desc32(struct dw_mci *host,
struct mmc_data *data,
unsigned int sg_len)
{
unsigned int desc_len;
struct idmac_desc *desc_first, *desc_last, *desc;
u32 val;
int i;
desc_first = desc_last = desc = host->sg_cpu;
for (i = 0; i < sg_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
u32 mem_addr = sg_dma_address(&data->sg[i]);
for ( ; length ; desc++) {
desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
length : DW_MCI_DESC_DATA_LENGTH;
length -= desc_len;
/*
* Wait for the former clear OWN bit operation
* of IDMAC to make sure that this descriptor
* isn't still owned by IDMAC as IDMAC's write
* ops and CPU's read ops are asynchronous.
*/
if (readl_poll_timeout_atomic(&desc->des0, val,
IDMAC_OWN_CLR64(val),
10,
100 * USEC_PER_MSEC))
goto err_own_bit;
/*
* Set the OWN bit and disable interrupts
* for this descriptor
*/
desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
IDMAC_DES0_DIC |
IDMAC_DES0_CH);
/* Buffer length */
IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
/* Physical address to DMA to/from */
desc->des2 = cpu_to_le32(mem_addr);
/* Update physical address for the next desc */
mem_addr += desc_len;
/* Save pointer to the last descriptor */
desc_last = desc;
}
}
/* Set first descriptor */
desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
/* Set last descriptor */
desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
IDMAC_DES0_DIC));
desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
return 0;
err_own_bit:
/* restore the descriptor chain as it's polluted */
dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
dw_mci_idmac_init(host);
return -EINVAL;
}
static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
{
u32 temp;
int ret;
if (host->dma_64bit_address == 1)
ret = dw_mci_prepare_desc64(host, host->data, sg_len);
else
ret = dw_mci_prepare_desc32(host, host->data, sg_len);
if (ret)
goto out;
/* drain writebuffer */
wmb();
/* Make sure to reset DMA in case we did PIO before this */
dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
dw_mci_idmac_reset(host);
/* Select IDMAC interface */
temp = mci_readl(host, CTRL);
temp |= SDMMC_CTRL_USE_IDMAC;
mci_writel(host, CTRL, temp);
/* drain writebuffer */
wmb();
/* Enable the IDMAC */
temp = mci_readl(host, BMOD);
temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
mci_writel(host, BMOD, temp);
/* Start it running */
mci_writel(host, PLDMND, 1);
out:
return ret;
}
static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
.stop = dw_mci_idmac_stop_dma,
.complete = dw_mci_dmac_complete_dma,
.cleanup = dw_mci_dma_cleanup,
};
static void dw_mci_edmac_stop_dma(struct dw_mci *host)
{
dmaengine_terminate_async(host->dms->ch);
}
static int dw_mci_edmac_start_dma(struct dw_mci *host,
unsigned int sg_len)
{
struct dma_slave_config cfg;
struct dma_async_tx_descriptor *desc = NULL;
struct scatterlist *sgl = host->data->sg;
const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 sg_elems = host->data->sg_len;
u32 fifoth_val;
u32 fifo_offset = host->fifo_reg - host->regs;
int ret = 0;
/* Set external dma config: burst size, burst width */
cfg.dst_addr = host->phy_regs + fifo_offset;
cfg.src_addr = cfg.dst_addr;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
/* Match burst msize with external dma config */
fifoth_val = mci_readl(host, FIFOTH);
cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
cfg.src_maxburst = cfg.dst_maxburst;
if (host->data->flags & MMC_DATA_WRITE)
cfg.direction = DMA_MEM_TO_DEV;
else
cfg.direction = DMA_DEV_TO_MEM;
ret = dmaengine_slave_config(host->dms->ch, &cfg);
if (ret) {
dev_err(host->dev, "Failed to config edmac.\n");
return -EBUSY;
}
desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
sg_len, cfg.direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(host->dev, "Can't prepare slave sg.\n");
return -EBUSY;
}
/* Set dw_mci_dmac_complete_dma as callback */
desc->callback = dw_mci_dmac_complete_dma;
desc->callback_param = (void *)host;
dmaengine_submit(desc);
/* Flush cache before write */
if (host->data->flags & MMC_DATA_WRITE)
dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
sg_elems, DMA_TO_DEVICE);
dma_async_issue_pending(host->dms->ch);
return 0;
}
static int dw_mci_edmac_init(struct dw_mci *host)
{
/* Request external dma channel */
host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
if (!host->dms)
return -ENOMEM;
host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
if (!host->dms->ch) {
dev_err(host->dev, "Failed to get external DMA channel.\n");
kfree(host->dms);
host->dms = NULL;
return -ENXIO;
}
return 0;
}
static void dw_mci_edmac_exit(struct dw_mci *host)
{
if (host->dms) {
if (host->dms->ch) {
dma_release_channel(host->dms->ch);
host->dms->ch = NULL;
}
kfree(host->dms);
host->dms = NULL;
}
}
static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
.init = dw_mci_edmac_init,
.exit = dw_mci_edmac_exit,
.start = dw_mci_edmac_start_dma,
.stop = dw_mci_edmac_stop_dma,
.complete = dw_mci_dmac_complete_dma,
.cleanup = dw_mci_dma_cleanup,
};
static int dw_mci_pre_dma_transfer(struct dw_mci *host,
struct mmc_data *data,
int cookie)
{
struct scatterlist *sg;
unsigned int i, sg_len;
if (data->host_cookie == COOKIE_PRE_MAPPED)
return data->sg_len;
/*
* We don't do DMA on "complex" transfers, i.e. with
* non-word-aligned buffers or lengths. Also, we don't bother
* with all the DMA setup overhead for short transfers.
*/
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
return -EINVAL;
if (data->blksz & 3)
return -EINVAL;
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3)
return -EINVAL;
}
sg_len = dma_map_sg(host->dev,
data->sg,
data->sg_len,
mmc_get_dma_dir(data));
if (sg_len == 0)
return -EINVAL;
data->host_cookie = cookie;
return sg_len;
}
static void dw_mci_pre_req(struct mmc_host *mmc,
struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!slot->host->use_dma || !data)
return;
/* This data might be unmapped at this time */
data->host_cookie = COOKIE_UNMAPPED;
if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
COOKIE_PRE_MAPPED) < 0)
data->host_cookie = COOKIE_UNMAPPED;
}
static void dw_mci_post_req(struct mmc_host *mmc,
struct mmc_request *mrq,
int err)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!slot->host->use_dma || !data)
return;
if (data->host_cookie != COOKIE_UNMAPPED)
dma_unmap_sg(slot->host->dev,
data->sg,
data->sg_len,
mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
static int dw_mci_get_cd(struct mmc_host *mmc)
{
int present;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
if (((mmc->caps & MMC_CAP_NEEDS_POLL)
|| !mmc_card_is_removable(mmc))) {
present = 1;
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
if (mmc->caps & MMC_CAP_NEEDS_POLL) {
dev_info(&mmc->class_dev,
"card is polling.\n");
} else {
dev_info(&mmc->class_dev,
"card is non-removable.\n");
}
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
}
return present;
} else if (gpio_cd >= 0)
present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
== 0 ? 1 : 0;
spin_lock_bh(&host->lock);
if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
dev_dbg(&mmc->class_dev, "card is present\n");
else if (!present &&
!test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
dev_dbg(&mmc->class_dev, "card is not present\n");
spin_unlock_bh(&host->lock);
return present;
}
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
int idx = ARRAY_SIZE(mszs) - 1;
/* pio should ship this scenario */
if (!host->use_dma)
return;
tx_wmark = (host->fifo_depth) / 2;
tx_wmark_invers = host->fifo_depth - tx_wmark;
/*
* MSIZE is '1',
* if blksz is not a multiple of the FIFO width
*/
if (blksz % fifo_width)
goto done;
do {
if (!((blksz_depth % mszs[idx]) ||
(tx_wmark_invers % mszs[idx]))) {
msize = idx;
rx_wmark = mszs[idx] - 1;
break;
}
} while (--idx > 0);
/*
* If idx is '0', it won't be tried
* Thus, initial values are uesed
*/
done:
fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
mci_writel(host, FIFOTH, fifoth_val);
}
static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
u32 blksz_depth, fifo_depth;
u16 thld_size;
u8 enable;
/*
* CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
* in the FIFO region, so we really shouldn't access it).
*/
if (host->verid < DW_MMC_240A ||
(host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
return;
/*
* Card write Threshold is introduced since 2.80a
* It's used when HS400 mode is enabled.
*/
if (data->flags & MMC_DATA_WRITE &&
!(host->timing != MMC_TIMING_MMC_HS400))
return;
if (data->flags & MMC_DATA_WRITE)
enable = SDMMC_CARD_WR_THR_EN;
else
enable = SDMMC_CARD_RD_THR_EN;
if (host->timing != MMC_TIMING_MMC_HS200 &&
host->timing != MMC_TIMING_UHS_SDR104)
goto disable;
blksz_depth = blksz / (1 << host->data_shift);
fifo_depth = host->fifo_depth;
if (blksz_depth > fifo_depth)
goto disable;
/*
* If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
* If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
* Currently just choose blksz.
*/
thld_size = blksz;
mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
return;
disable:
mci_writel(host, CDTHRCTL, 0);
}
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
unsigned long irqflags;
int sg_len;
u32 temp;
host->using_dma = 0;
/* If we don't have a channel, we can't do DMA */
if (!host->use_dma)
return -ENODEV;
sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
if (sg_len < 0) {
host->dma_ops->stop(host);
return sg_len;
}
host->using_dma = 1;
if (host->use_dma == TRANS_MODE_IDMAC)
dev_vdbg(host->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
(unsigned long)host->sg_cpu,
(unsigned long)host->sg_dma,
sg_len);
/*
* Decide the MSIZE and RX/TX Watermark.
* If current block size is same with previous size,
* no need to update fifoth.
*/
if (host->prev_blksz != data->blksz)
dw_mci_adjust_fifoth(host, data);
/* Enable the DMA interface */
temp = mci_readl(host, CTRL);
temp |= SDMMC_CTRL_DMA_ENABLE;
mci_writel(host, CTRL, temp);
/* Disable RX/TX IRQs, let DMA handle it */
spin_lock_irqsave(&host->irq_lock, irqflags);
temp = mci_readl(host, INTMASK);
temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
mci_writel(host, INTMASK, temp);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
if (host->dma_ops->start(host, sg_len)) {
host->dma_ops->stop(host);
/* We can't do DMA, try PIO for this one */
dev_dbg(host->dev,
"%s: fall back to PIO mode for current transfer\n",
__func__);
return -ENODEV;
}
return 0;
}
static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
{
unsigned long irqflags;
int flags = SG_MITER_ATOMIC;
u32 temp;
data->error = -EINPROGRESS;
WARN_ON(host->data);
host->sg = NULL;
host->data = data;
if (data->flags & MMC_DATA_READ)
host->dir_status = DW_MCI_RECV_STATUS;
else
host->dir_status = DW_MCI_SEND_STATUS;
dw_mci_ctrl_thld(host, data);
if (dw_mci_submit_data_dma(host, data)) {
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
if (host->data->flags & MMC_DATA_READ)
flags |= SG_MITER_TO_SG;
else
flags |= SG_MITER_FROM_SG;
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
host->sg = data->sg;
host->part_buf_start = 0;
host->part_buf_count = 0;
mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
spin_lock_irqsave(&host->irq_lock, irqflags);
temp = mci_readl(host, INTMASK);
temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
mci_writel(host, INTMASK, temp);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_DMA_ENABLE;
mci_writel(host, CTRL, temp);
/*
* Use the initial fifoth_val for PIO mode. If wm_algined
* is set, we set watermark same as data size.
* If next issued data may be transfered by DMA mode,
* prev_blksz should be invalidated.
*/
if (host->wm_aligned)
dw_mci_adjust_fifoth(host, data);
else
mci_writel(host, FIFOTH, host->fifoth_val);
host->prev_blksz = 0;
} else {
/*
* Keep the current block size.
* It will be used to decide whether to update
* fifoth register next time.
*/
host->prev_blksz = data->blksz;
}
}
static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
{
struct dw_mci *host = slot->host;
unsigned int clock = slot->clock;
u32 div;
u32 clk_en_a;
u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
/* We must continue to set bit 28 in CMD until the change is complete */
if (host->state == STATE_WAITING_CMD11_DONE)
sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
if (!clock) {
mci_writel(host, CLKENA, 0);
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
} else if (clock != host->current_speed || force_clkinit) {
div = host->bus_hz / clock;
if (host->bus_hz % clock && host->bus_hz > clock)
/*
* move the + 1 after the divide to prevent
* over-clocking the card.
*/
div += 1;
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
if ((clock != slot->__clk_old &&
!test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
force_clkinit) {
/* Silent the verbose log if calling from PM context */
if (!force_clkinit)
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
slot->id, host->bus_hz, clock,
div ? ((host->bus_hz / div) >> 1) :
host->bus_hz, div);
/*
* If card is polling, display the message only
* one time at boot time.
*/
if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
slot->mmc->f_min == clock)
set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
}
/* disable clock */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
/* inform CIU */
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
/* set clock to desired speed */
mci_writel(host, CLKDIV, div);
/* inform CIU */
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
/* enable clock; only low power if no SDIO */
clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
mci_writel(host, CLKENA, clk_en_a);
/* inform CIU */
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
/* keep the last clock value that was requested from core */
slot->__clk_old = clock;
}
host->current_speed = clock;
/* Set the current slot bus width */
mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
static void __dw_mci_start_request(struct dw_mci *host,
struct dw_mci_slot *slot,
struct mmc_command *cmd)
{
struct mmc_request *mrq;
struct mmc_data *data;
u32 cmdflags;
mrq = slot->mrq;
host->cur_slot = slot;
host->mrq = mrq;
host->pending_events = 0;
host->completed_events = 0;
host->cmd_status = 0;
host->data_status = 0;
host->dir_status = 0;
data = cmd->data;
if (data) {
mci_writel(host, TMOUT, 0xFFFFFFFF);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
/* this is the first command, send the initialization clock */
if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
cmdflags |= SDMMC_CMD_INIT;
if (data) {
dw_mci_submit_data(host, data);
wmb(); /* drain writebuffer */
}
dw_mci_start_command(host, cmd, cmdflags);
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
unsigned long irqflags;
/*
* Databook says to fail after 2ms w/ no response, but evidence
* shows that sometimes the cmd11 interrupt takes over 130ms.
* We'll set to 500ms, plus an extra jiffy just in case jiffies
* is just about to roll over.
*
* We do this whole thing under spinlock and only if the
* command hasn't already completed (indicating the the irq
* already ran so we don't want the timeout).
*/
spin_lock_irqsave(&host->irq_lock, irqflags);
if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
mod_timer(&host->cmd11_timer,
jiffies + msecs_to_jiffies(500) + 1);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
}
static void dw_mci_start_request(struct dw_mci *host,
struct dw_mci_slot *slot)
{
struct mmc_request *mrq = slot->mrq;
struct mmc_command *cmd;
cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
__dw_mci_start_request(host, slot, cmd);
}
/* must be called with host->lock held */
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
{
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
slot->mrq = mrq;
if (host->state == STATE_WAITING_CMD11_DONE) {
dev_warn(&slot->mmc->class_dev,
"Voltage change didn't complete\n");
/*
* this case isn't expected to happen, so we can
* either crash here or just try to continue on
* in the closest possible state
*/
host->state = STATE_IDLE;
}
if (host->state == STATE_IDLE) {
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
} else {
list_add_tail(&slot->queue_node, &host->queue);
}
}
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
WARN_ON(slot->mrq);
/*
* The check for card presence and queueing of the request must be
* atomic, otherwise the card could be removed in between and the
* request wouldn't fail until another card was inserted.
*/
if (!dw_mci_get_cd(mmc)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
spin_lock_bh(&host->lock);
dw_mci_queue_request(host, slot, mrq);
spin_unlock_bh(&host->lock);
}
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 regs;
int ret;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_4:
slot->ctype = SDMMC_CTYPE_4BIT;
break;
case MMC_BUS_WIDTH_8:
slot->ctype = SDMMC_CTYPE_8BIT;
break;
default:
/* set default 1 bit mode */
slot->ctype = SDMMC_CTYPE_1BIT;
}
regs = mci_readl(slot->host, UHS_REG);
/* DDR mode set */
if (ios->timing == MMC_TIMING_MMC_DDR52 ||
ios->timing == MMC_TIMING_UHS_DDR50 ||
ios->timing == MMC_TIMING_MMC_HS400)
regs |= ((0x1 << slot->id) << 16);
else
regs &= ~((0x1 << slot->id) << 16);
mci_writel(slot->host, UHS_REG, regs);
slot->host->timing = ios->timing;
/*
* Use mirror of ios->clock to prevent race with mmc
* core ios update when finding the minimum.
*/
slot->clock = ios->clock;
if (drv_data && drv_data->set_ios)
drv_data->set_ios(slot->host, ios);
switch (ios->power_mode) {
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc)) {
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
ios->vdd);
if (ret) {
dev_err(slot->host->dev,
"failed to enable vmmc regulator\n");
/*return, if failed turn on vmmc*/
return;
}
}
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
regs = mci_readl(slot->host, PWREN);
regs |= (1 << slot->id);
mci_writel(slot->host, PWREN, regs);
break;
case MMC_POWER_ON:
if (!slot->host->vqmmc_enabled) {
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = regulator_enable(mmc->supply.vqmmc);
if (ret < 0)
dev_err(slot->host->dev,
"failed to enable vqmmc\n");
else
slot->host->vqmmc_enabled = true;
} else {
/* Keep track so we don't reset again */
slot->host->vqmmc_enabled = true;
}
/* Reset our state machine after powering on */
dw_mci_ctrl_reset(slot->host,
SDMMC_CTRL_ALL_RESET_FLAGS);
}
/* Adjust clock / bus width after power is up */
dw_mci_setup_bus(slot, false);
break;
case MMC_POWER_OFF:
/* Turn clock off before power goes down */
dw_mci_setup_bus(slot, false);
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
regulator_disable(mmc->supply.vqmmc);
slot->host->vqmmc_enabled = false;
regs = mci_readl(slot->host, PWREN);
regs &= ~(1 << slot->id);
mci_writel(slot->host, PWREN, regs);
break;
default:
break;
}
if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
slot->host->state = STATE_IDLE;
}
static int dw_mci_card_busy(struct mmc_host *mmc)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
u32 status;
/*
* Check the busy bit which is low when DAT[3:0]
* (the data lines) are 0000
*/
status = mci_readl(slot->host, STATUS);
return !!(status & SDMMC_STATUS_BUSY);
}
static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = host->drv_data;
u32 uhs;
u32 v18 = SDMMC_UHS_18V << slot->id;
int ret;
if (drv_data && drv_data->switch_voltage)
return drv_data->switch_voltage(mmc, ios);
/*
* Program the voltage. Note that some instances of dw_mmc may use
* the UHS_REG for this. For other instances (like exynos) the UHS_REG
* does no harm but you need to set the regulator directly. Try both.
*/
uhs = mci_readl(host, UHS_REG);
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
uhs &= ~v18;
else
uhs |= v18;
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
dev_dbg(&mmc->class_dev,
"Regulator set error %d - %s V\n",
ret, uhs & v18 ? "1.8" : "3.3");
return ret;
}
}
mci_writel(host, UHS_REG, uhs);
return 0;
}
static int dw_mci_get_ro(struct mmc_host *mmc)
{
int read_only;
struct dw_mci_slot *slot = mmc_priv(mmc);
int gpio_ro = mmc_gpio_get_ro(mmc);
/* Use platform get_ro function, else try on board write protect */
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (gpio_ro >= 0)
read_only = gpio_ro;
else
read_only =
mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
dev_dbg(&mmc->class_dev, "card is %s\n",
read_only ? "read-only" : "read-write");
return read_only;
}
static void dw_mci_hw_reset(struct mmc_host *mmc)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
int reset;
if (host->use_dma == TRANS_MODE_IDMAC)
dw_mci_idmac_reset(host);
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
SDMMC_CTRL_FIFO_RESET))
return;
/*
* According to eMMC spec, card reset procedure:
* tRstW >= 1us: RST_n pulse width
* tRSCA >= 200us: RST_n to Command time
* tRSTH >= 1us: RST_n high period
*/
reset = mci_readl(host, RST_N);
reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
mci_writel(host, RST_N, reset);
usleep_range(1, 2);
reset |= SDMMC_RST_HWACTIVE << slot->id;
mci_writel(host, RST_N, reset);
usleep_range(200, 300);
}
static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
/*
* Low power mode will stop the card clock when idle. According to the
* description of the CLKENA register we should disable low power mode
* for SDIO cards if we need SDIO interrupts to work.
*/
if (mmc->caps & MMC_CAP_SDIO_IRQ) {
const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
u32 clk_en_a_old;
u32 clk_en_a;
clk_en_a_old = mci_readl(host, CLKENA);
if (card->type == MMC_TYPE_SDIO ||
card->type == MMC_TYPE_SD_COMBO) {
set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
clk_en_a = clk_en_a_old & ~clken_low_pwr;
} else {
clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
clk_en_a = clk_en_a_old | clken_low_pwr;
}
if (clk_en_a != clk_en_a_old) {
mci_writel(host, CLKENA, clk_en_a);
mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
SDMMC_CMD_PRV_DAT_WAIT, 0);
}
}
}
static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
{
struct dw_mci *host = slot->host;
unsigned long irqflags;
u32 int_mask;
spin_lock_irqsave(&host->irq_lock, irqflags);
/* Enable/disable Slot Specific SDIO interrupt */
int_mask = mci_readl(host, INTMASK);
if (enb)
int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
else
int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
mci_writel(host, INTMASK, int_mask);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
__dw_mci_enable_sdio_irq(slot, enb);
/* Avoid runtime suspending the device when SDIO IRQ is enabled */
if (enb)
pm_runtime_get_noresume(host->dev);
else
pm_runtime_put_noidle(host->dev);
}
static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
__dw_mci_enable_sdio_irq(slot, 1);
}
static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = host->drv_data;
int err = -EINVAL;
if (drv_data && drv_data->execute_tuning)
err = drv_data->execute_tuning(slot, opcode);
return err;
}
static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = host->drv_data;
if (drv_data && drv_data->prepare_hs400_tuning)
return drv_data->prepare_hs400_tuning(host, ios);
return 0;
}
static bool dw_mci_reset(struct dw_mci *host)
{
u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
bool ret = false;
u32 status = 0;
/*
* Resetting generates a block interrupt, hence setting
* the scatter-gather pointer to NULL.
*/
if (host->sg) {
sg_miter_stop(&host->sg_miter);
host->sg = NULL;
}
if (host->use_dma)
flags |= SDMMC_CTRL_DMA_RESET;
if (dw_mci_ctrl_reset(host, flags)) {
/*
* In all cases we clear the RAWINTS
* register to clear any interrupts.
*/
mci_writel(host, RINTSTS, 0xFFFFFFFF);
if (!host->use_dma) {
ret = true;
goto ciu_out;
}
/* Wait for dma_req to be cleared */
if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
status,
!(status & SDMMC_STATUS_DMA_REQ),
1, 500 * USEC_PER_MSEC)) {
dev_err(host->dev,
"%s: Timeout waiting for dma_req to be cleared\n",
__func__);
goto ciu_out;
}
/* when using DMA next we reset the fifo again */
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
goto ciu_out;
} else {
/* if the controller reset bit did clear, then set clock regs */
if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
dev_err(host->dev,
"%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
__func__);
goto ciu_out;
}
}
if (host->use_dma == TRANS_MODE_IDMAC)
/* It is also recommended that we reset and reprogram idmac */
dw_mci_idmac_reset(host);
ret = true;
ciu_out:
/* After a CTRL reset we need to have CIU set clock registers */
mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
return ret;
}
static const struct mmc_host_ops dw_mci_ops = {
.request = dw_mci_request,
.pre_req = dw_mci_pre_req,
.post_req = dw_mci_post_req,
.set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
.hw_reset = dw_mci_hw_reset,
.enable_sdio_irq = dw_mci_enable_sdio_irq,
.ack_sdio_irq = dw_mci_ack_sdio_irq,
.execute_tuning = dw_mci_execute_tuning,
.card_busy = dw_mci_card_busy,
.start_signal_voltage_switch = dw_mci_switch_voltage,
.init_card = dw_mci_init_card,
.prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
__releases(&host->lock)
__acquires(&host->lock)
{
struct dw_mci_slot *slot;
struct mmc_host *prev_mmc = host->cur_slot->mmc;
WARN_ON(host->cmd || host->data);
host->cur_slot->mrq = NULL;
host->mrq = NULL;
if (!list_empty(&host->queue)) {
slot = list_entry(host->queue.next,
struct dw_mci_slot, queue_node);
list_del(&slot->queue_node);
dev_vdbg(host->dev, "list not empty: %s is next\n",
mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
} else {
dev_vdbg(host->dev, "list empty\n");
if (host->state == STATE_SENDING_CMD11)
host->state = STATE_WAITING_CMD11_DONE;
else
host->state = STATE_IDLE;
}
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
}
static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
{
u32 status = host->cmd_status;
host->cmd_status = 0;
/* Read the response from the card (up to 16 bytes) */
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
cmd->resp[3] = mci_readl(host, RESP0);
cmd->resp[2] = mci_readl(host, RESP1);
cmd->resp[1] = mci_readl(host, RESP2);
cmd->resp[0] = mci_readl(host, RESP3);
} else {
cmd->resp[0] = mci_readl(host, RESP0);
cmd->resp[1] = 0;
cmd->resp[2] = 0;
cmd->resp[3] = 0;
}
}
if (status & SDMMC_INT_RTO)
cmd->error = -ETIMEDOUT;
else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
cmd->error = -EILSEQ;
else if (status & SDMMC_INT_RESP_ERR)
cmd->error = -EIO;
else
cmd->error = 0;
return cmd->error;
}
static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
{
u32 status = host->data_status;
if (status & DW_MCI_DATA_ERROR_FLAGS) {
if (status & SDMMC_INT_DRTO) {
data->error = -ETIMEDOUT;
} else if (status & SDMMC_INT_DCRC) {
data->error = -EILSEQ;
} else if (status & SDMMC_INT_EBE) {
if (host->dir_status ==
DW_MCI_SEND_STATUS) {
/*
* No data CRC status was returned.
* The number of bytes transferred
* will be exaggerated in PIO mode.
*/
data->bytes_xfered = 0;
data->error = -ETIMEDOUT;
} else if (host->dir_status ==
DW_MCI_RECV_STATUS) {
mmc: dw_mmc: return -EILSEQ for EBE and SBE error The following log we found indicate the fact that dw_mmc didn't treat EBE or SBE as a similar problem as CRC error. -EIO is quite not informative as it may indicate that the device is broken rather than that of tuning stuff. ... [ 89.057226] bcmsdh_sdmmc: Failed to Read byte F1:@0x1001f=ff, Err: -5 [ 89.058811] bcmsdh_sdmmc: Failed to Read byte F1:@0x1001f=ff, Err: -5 [ 89.059415] bcmsdh_sdmmc: Failed to Read byte F1:@0x1000e=ff, Err: -84 [ 89.254248] dwmmc_rockchip fe310000.dwmmc: Successfully tuned phase to 199 [ 89.273912] dhd_set_suspend: Remove extra suspend setting [ 89.274478] dhd_enable_packet_filter: enter, value = 0 64 bytes from 112.90.83.112: icmp_seq=24 ttl=53 time=1321 ms 64 bytes from 112.90.83.112: icmp_seq=25 ttl=53 time=319 ms 64 bytes from 112.90.83.112: icmp_seq=26 ttl=53 time=69.8 ms 64 bytes from 112.90.83.112: icmp_seq=27 ttl=53 time=37.5 ms ... For the host, when failing to sample cmd's response due to tuning stuff, we still return -EIO as it's quite vague to figure out whether it related to signal or just the broken devices, especially for the card type detection when booting kernel as all things go well but the cmd set used. But for the data phase, if receiving the cmd's response which carriess data transfer, we should have more confidence that it is very probably related to the tuning stuff. Just as the log shown above, we sometimes suffer too much this kind of pain as the dw_mmc return -EIO for the case, so mmc-core will not do retune and caller drivers like bcm's wifi driver, still retry the failure more and more until dw_mmc finally generate CRC. Adrian suggested that drivers who care the specific cases should call mmc_retune_needed rather than doing it in mmc core. It makes sense but I'm considering that -EILSEQ actually means illegal sequence , so we use it for CRC cases. Meanwhile, SBE/EBE indicate the illegal sequence of start bit or end bit for data0~7. So I realize that we should use -EILSEQ for them both as well CRC cases. Suggested-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-08-22 02:57:16 +00:00
data->error = -EILSEQ;
}
} else {
/* SDMMC_INT_SBE is included */
mmc: dw_mmc: return -EILSEQ for EBE and SBE error The following log we found indicate the fact that dw_mmc didn't treat EBE or SBE as a similar problem as CRC error. -EIO is quite not informative as it may indicate that the device is broken rather than that of tuning stuff. ... [ 89.057226] bcmsdh_sdmmc: Failed to Read byte F1:@0x1001f=ff, Err: -5 [ 89.058811] bcmsdh_sdmmc: Failed to Read byte F1:@0x1001f=ff, Err: -5 [ 89.059415] bcmsdh_sdmmc: Failed to Read byte F1:@0x1000e=ff, Err: -84 [ 89.254248] dwmmc_rockchip fe310000.dwmmc: Successfully tuned phase to 199 [ 89.273912] dhd_set_suspend: Remove extra suspend setting [ 89.274478] dhd_enable_packet_filter: enter, value = 0 64 bytes from 112.90.83.112: icmp_seq=24 ttl=53 time=1321 ms 64 bytes from 112.90.83.112: icmp_seq=25 ttl=53 time=319 ms 64 bytes from 112.90.83.112: icmp_seq=26 ttl=53 time=69.8 ms 64 bytes from 112.90.83.112: icmp_seq=27 ttl=53 time=37.5 ms ... For the host, when failing to sample cmd's response due to tuning stuff, we still return -EIO as it's quite vague to figure out whether it related to signal or just the broken devices, especially for the card type detection when booting kernel as all things go well but the cmd set used. But for the data phase, if receiving the cmd's response which carriess data transfer, we should have more confidence that it is very probably related to the tuning stuff. Just as the log shown above, we sometimes suffer too much this kind of pain as the dw_mmc return -EIO for the case, so mmc-core will not do retune and caller drivers like bcm's wifi driver, still retry the failure more and more until dw_mmc finally generate CRC. Adrian suggested that drivers who care the specific cases should call mmc_retune_needed rather than doing it in mmc core. It makes sense but I'm considering that -EILSEQ actually means illegal sequence , so we use it for CRC cases. Meanwhile, SBE/EBE indicate the illegal sequence of start bit or end bit for data0~7. So I realize that we should use -EILSEQ for them both as well CRC cases. Suggested-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-08-22 02:57:16 +00:00
data->error = -EILSEQ;
}
dev_dbg(host->dev, "data error, status 0x%08x\n", status);
/*
* After an error, there may be data lingering
* in the FIFO
*/
dw_mci_reset(host);
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
}
return data->error;
}
static void dw_mci_set_drto(struct dw_mci *host)
{
unsigned int drto_clks;
unsigned int drto_ms;
drto_clks = mci_readl(host, TMOUT) >> 8;
drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
/* add a bit spare time */
drto_ms += 10;
mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
}
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
struct mmc_data *data;
struct mmc_command *cmd;
struct mmc_request *mrq;
enum dw_mci_state state;
enum dw_mci_state prev_state;
unsigned int err;
spin_lock(&host->lock);
state = host->state;
data = host->data;
mrq = host->mrq;
do {
prev_state = state;
switch (state) {
case STATE_IDLE:
case STATE_WAITING_CMD11_DONE:
break;
case STATE_SENDING_CMD11:
case STATE_SENDING_CMD:
if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
&host->pending_events))
break;
cmd = host->cmd;
host->cmd = NULL;
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
err = dw_mci_command_complete(host, cmd);
if (cmd == mrq->sbc && !err) {
prev_state = state = STATE_SENDING_CMD;
__dw_mci_start_request(host, host->cur_slot,
mrq->cmd);
goto unlock;
}
if (cmd->data && err) {
mmc: dw_mmc: Wait for data transfer after response errors. According to the DesignWare state machine description, after we get a "response error" or "response CRC error" we move into data transfer mode. That means that we don't necessarily need to special case trying to deal with the failure right away. We can wait until we are notified that the data transfer is complete (with or without errors) and then we can deal with the failure. It may sound strange to defer dealing with a command that we know will fail anyway, but this appears to fix a bug. During tuning (CMD19) on a specific card on an rk3288-based system, we found that we could get a "response CRC error". Sending the stop command after the "response CRC error" would then throw the system into a confused state causing all future tuning phases to report failure. When in the confused state, the controller would show these (hex codes are interrupt status register): CMD ERR: 0x00000046 (cmd=19) CMD ERR: 0x0000004e (cmd=12) DATA ERR: 0x00000208 DATA ERR: 0x0000020c CMD ERR: 0x00000104 (cmd=19) CMD ERR: 0x00000104 (cmd=12) DATA ERR: 0x00000208 DATA ERR: 0x0000020c ... ... It is inherently difficult to deal with the complexity of trying to correctly send a stop command while a data transfer is taking place since you need to deal with different corner cases caused by the fact that the data transfer could complete (with errors or without errors) during various places in sending the stop command (dw_mci_stop_dma, send_stop_abort, etc) Instead of adding a bunch of extra complexity to deal with this, it seems much simpler to just use the more straightforward (and less error-prone) path of letting the data transfer finish. There shouldn't be any huge benefit to sending the stop command slightly earlier, anyway. Signed-off-by: Doug Anderson <dianders@chromium.org> Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com> Cc: Alim Akhtar <alim.akhtar@gmail.com> Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-04-26 08:03:58 +00:00
/*
* During UHS tuning sequence, sending the stop
* command after the response CRC error would
* throw the system into a confused state
* causing all future tuning phases to report
* failure.
*
* In such case controller will move into a data
* transfer state after a response error or
* response CRC error. Let's let that finish
* before trying to send a stop, so we'll go to
* STATE_SENDING_DATA.
*
* Although letting the data transfer take place
* will waste a bit of time (we already know
* the command was bad), it can't cause any
* errors since it's possible it would have
* taken place anyway if this tasklet got
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
if ((err != -ETIMEDOUT) &&
(cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
state = STATE_SENDING_DATA;
continue;
}
mmc: dw_mmc: fix error handling on response error Even if response error is detected in case data command, data transfer is continued. It means that data can live in FIFO. Current handling just breaks out the request when seeing the command error. This causes kernel panic in dw_mci_read_data_pio() [host->data = NULL]. And also, FIFO should be guaranteed to be empty. Unable to handle kernel NULL pointer dereference at virtual address 00000018 <...> [<c02af814>] (dw_mci_read_data_pio+0x68/0x198) from [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) from [<c006b094>] (handle_irq_event_percpu+0x50/0x194) [<c006b094>] (handle_irq_event_percpu+0x50/0x194) from [<c006b214>] (handle_irq_event+0x3c/0x5c) [<c006b214>] (handle_irq_event+0x3c/0x5c) from [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) from [<c006aa88>] (generic_handle_irq+0x20/0x30) [<c006aa88>] (generic_handle_irq+0x20/0x30) from [<c000f154>] (handle_IRQ+0x38/0x90) [<c000f154>] (handle_IRQ+0x38/0x90) from [<c00085bc>] (gic_handle_irq+0x34/0x68) [<c00085bc>] (gic_handle_irq+0x34/0x68) from [<c0011f40>] (__irq_svc+0x40/0x70) Exception stack(0xef0b1c00 to 0xef0b1c48) 1c00: 000eb0cf ffffffff 00001300 c01a7738 ef295e10 0000000a c04df298 ef0b1dc0 1c20: ef295ec0 00000000 00000000 00000006 00000000 ef0b1c48 c02b1274 c01a7764 1c40: 20000113 ffffffff [<c0011f40>] (__irq_svc+0x40/0x70) from [<c01a7764>] (__loop_delay+0x0/0xc) Code: e1a00005 e0891006 e0662004 e12fff33 (e59a3018) ---[ end trace a7043b9ba9aed1db ]--- Kernel panic - not syncing: Fatal exception in interrupt Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Tested-by: Alim Akhtar <alim.akhtar@samsung.com> Signed-off-by: Chris Ball <cjb@laptop.org>
2013-08-30 15:13:59 +00:00
dw_mci_stop_dma(host);
send_stop_abort(host, data);
state = STATE_SENDING_STOP;
break;
mmc: dw_mmc: fix error handling on response error Even if response error is detected in case data command, data transfer is continued. It means that data can live in FIFO. Current handling just breaks out the request when seeing the command error. This causes kernel panic in dw_mci_read_data_pio() [host->data = NULL]. And also, FIFO should be guaranteed to be empty. Unable to handle kernel NULL pointer dereference at virtual address 00000018 <...> [<c02af814>] (dw_mci_read_data_pio+0x68/0x198) from [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) from [<c006b094>] (handle_irq_event_percpu+0x50/0x194) [<c006b094>] (handle_irq_event_percpu+0x50/0x194) from [<c006b214>] (handle_irq_event+0x3c/0x5c) [<c006b214>] (handle_irq_event+0x3c/0x5c) from [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) from [<c006aa88>] (generic_handle_irq+0x20/0x30) [<c006aa88>] (generic_handle_irq+0x20/0x30) from [<c000f154>] (handle_IRQ+0x38/0x90) [<c000f154>] (handle_IRQ+0x38/0x90) from [<c00085bc>] (gic_handle_irq+0x34/0x68) [<c00085bc>] (gic_handle_irq+0x34/0x68) from [<c0011f40>] (__irq_svc+0x40/0x70) Exception stack(0xef0b1c00 to 0xef0b1c48) 1c00: 000eb0cf ffffffff 00001300 c01a7738 ef295e10 0000000a c04df298 ef0b1dc0 1c20: ef295ec0 00000000 00000000 00000006 00000000 ef0b1c48 c02b1274 c01a7764 1c40: 20000113 ffffffff [<c0011f40>] (__irq_svc+0x40/0x70) from [<c01a7764>] (__loop_delay+0x0/0xc) Code: e1a00005 e0891006 e0662004 e12fff33 (e59a3018) ---[ end trace a7043b9ba9aed1db ]--- Kernel panic - not syncing: Fatal exception in interrupt Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Tested-by: Alim Akhtar <alim.akhtar@samsung.com> Signed-off-by: Chris Ball <cjb@laptop.org>
2013-08-30 15:13:59 +00:00
}
if (!cmd->data || err) {
dw_mci_request_end(host, mrq);
goto unlock;
}
prev_state = state = STATE_SENDING_DATA;
/* fall through */
case STATE_SENDING_DATA:
/*
* We could get a data error and never a transfer
* complete so we'd better check for it here.
*
* Note that we don't really care if we also got a
* transfer complete; stopping the DMA and sending an
* abort won't hurt.
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
state = STATE_DATA_ERROR;
break;
}
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
&host->pending_events)) {
/*
* If all data-related interrupts don't come
* within the given time in reading data state.
*/
if (host->dir_status == DW_MCI_RECV_STATUS)
dw_mci_set_drto(host);
break;
}
set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
/*
* Handle an EVENT_DATA_ERROR that might have shown up
* before the transfer completed. This might not have
* been caught by the check above because the interrupt
* could have gone off between the previous check and
* the check for transfer complete.
*
* Technically this ought not be needed assuming we
* get a DATA_COMPLETE eventually (we'll notice the
* error and end the request), but it shouldn't hurt.
*
* This has the advantage of sending the stop command.
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
state = STATE_DATA_ERROR;
break;
}
prev_state = state = STATE_DATA_BUSY;
/* fall through */
case STATE_DATA_BUSY:
if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
&host->pending_events)) {
/*
* If data error interrupt comes but data over
* interrupt doesn't come within the given time.
* in reading data state.
*/
if (host->dir_status == DW_MCI_RECV_STATUS)
dw_mci_set_drto(host);
break;
}
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
err = dw_mci_data_complete(host, data);
if (!err) {
if (!data->stop || mrq->sbc) {
if (mrq->sbc && data->stop)
data->stop->error = 0;
dw_mci_request_end(host, mrq);
goto unlock;
}
/* stop command for open-ended transfer*/
if (data->stop)
send_stop_abort(host, data);
} else {
/*
* If we don't have a command complete now we'll
* never get one since we just reset everything;
* better end the request.
*
* If we do have a command complete we'll fall
* through to the SENDING_STOP command and
* everything will be peachy keen.
*/
if (!test_bit(EVENT_CMD_COMPLETE,
&host->pending_events)) {
host->cmd = NULL;
dw_mci_request_end(host, mrq);
goto unlock;
}
}
/*
* If err has non-zero,
* stop-abort command has been already issued.
*/
prev_state = state = STATE_SENDING_STOP;
/* fall through */
case STATE_SENDING_STOP:
if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
&host->pending_events))
break;
mmc: dw_mmc: fix error handling on response error Even if response error is detected in case data command, data transfer is continued. It means that data can live in FIFO. Current handling just breaks out the request when seeing the command error. This causes kernel panic in dw_mci_read_data_pio() [host->data = NULL]. And also, FIFO should be guaranteed to be empty. Unable to handle kernel NULL pointer dereference at virtual address 00000018 <...> [<c02af814>] (dw_mci_read_data_pio+0x68/0x198) from [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) from [<c006b094>] (handle_irq_event_percpu+0x50/0x194) [<c006b094>] (handle_irq_event_percpu+0x50/0x194) from [<c006b214>] (handle_irq_event+0x3c/0x5c) [<c006b214>] (handle_irq_event+0x3c/0x5c) from [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) from [<c006aa88>] (generic_handle_irq+0x20/0x30) [<c006aa88>] (generic_handle_irq+0x20/0x30) from [<c000f154>] (handle_IRQ+0x38/0x90) [<c000f154>] (handle_IRQ+0x38/0x90) from [<c00085bc>] (gic_handle_irq+0x34/0x68) [<c00085bc>] (gic_handle_irq+0x34/0x68) from [<c0011f40>] (__irq_svc+0x40/0x70) Exception stack(0xef0b1c00 to 0xef0b1c48) 1c00: 000eb0cf ffffffff 00001300 c01a7738 ef295e10 0000000a c04df298 ef0b1dc0 1c20: ef295ec0 00000000 00000000 00000006 00000000 ef0b1c48 c02b1274 c01a7764 1c40: 20000113 ffffffff [<c0011f40>] (__irq_svc+0x40/0x70) from [<c01a7764>] (__loop_delay+0x0/0xc) Code: e1a00005 e0891006 e0662004 e12fff33 (e59a3018) ---[ end trace a7043b9ba9aed1db ]--- Kernel panic - not syncing: Fatal exception in interrupt Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Tested-by: Alim Akhtar <alim.akhtar@samsung.com> Signed-off-by: Chris Ball <cjb@laptop.org>
2013-08-30 15:13:59 +00:00
/* CMD error in data command */
if (mrq->cmd->error && mrq->data)
dw_mci_reset(host);
mmc: dw_mmc: fix error handling on response error Even if response error is detected in case data command, data transfer is continued. It means that data can live in FIFO. Current handling just breaks out the request when seeing the command error. This causes kernel panic in dw_mci_read_data_pio() [host->data = NULL]. And also, FIFO should be guaranteed to be empty. Unable to handle kernel NULL pointer dereference at virtual address 00000018 <...> [<c02af814>] (dw_mci_read_data_pio+0x68/0x198) from [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) from [<c006b094>] (handle_irq_event_percpu+0x50/0x194) [<c006b094>] (handle_irq_event_percpu+0x50/0x194) from [<c006b214>] (handle_irq_event+0x3c/0x5c) [<c006b214>] (handle_irq_event+0x3c/0x5c) from [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) from [<c006aa88>] (generic_handle_irq+0x20/0x30) [<c006aa88>] (generic_handle_irq+0x20/0x30) from [<c000f154>] (handle_IRQ+0x38/0x90) [<c000f154>] (handle_IRQ+0x38/0x90) from [<c00085bc>] (gic_handle_irq+0x34/0x68) [<c00085bc>] (gic_handle_irq+0x34/0x68) from [<c0011f40>] (__irq_svc+0x40/0x70) Exception stack(0xef0b1c00 to 0xef0b1c48) 1c00: 000eb0cf ffffffff 00001300 c01a7738 ef295e10 0000000a c04df298 ef0b1dc0 1c20: ef295ec0 00000000 00000000 00000006 00000000 ef0b1c48 c02b1274 c01a7764 1c40: 20000113 ffffffff [<c0011f40>] (__irq_svc+0x40/0x70) from [<c01a7764>] (__loop_delay+0x0/0xc) Code: e1a00005 e0891006 e0662004 e12fff33 (e59a3018) ---[ end trace a7043b9ba9aed1db ]--- Kernel panic - not syncing: Fatal exception in interrupt Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Tested-by: Alim Akhtar <alim.akhtar@samsung.com> Signed-off-by: Chris Ball <cjb@laptop.org>
2013-08-30 15:13:59 +00:00
host->cmd = NULL;
mmc: dw_mmc: fix error handling on response error Even if response error is detected in case data command, data transfer is continued. It means that data can live in FIFO. Current handling just breaks out the request when seeing the command error. This causes kernel panic in dw_mci_read_data_pio() [host->data = NULL]. And also, FIFO should be guaranteed to be empty. Unable to handle kernel NULL pointer dereference at virtual address 00000018 <...> [<c02af814>] (dw_mci_read_data_pio+0x68/0x198) from [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) [<c02b04b4>] (dw_mci_interrupt+0x374/0x3a0) from [<c006b094>] (handle_irq_event_percpu+0x50/0x194) [<c006b094>] (handle_irq_event_percpu+0x50/0x194) from [<c006b214>] (handle_irq_event+0x3c/0x5c) [<c006b214>] (handle_irq_event+0x3c/0x5c) from [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) [<c006de1c>] (handle_fasteoi_irq+0xa4/0x148) from [<c006aa88>] (generic_handle_irq+0x20/0x30) [<c006aa88>] (generic_handle_irq+0x20/0x30) from [<c000f154>] (handle_IRQ+0x38/0x90) [<c000f154>] (handle_IRQ+0x38/0x90) from [<c00085bc>] (gic_handle_irq+0x34/0x68) [<c00085bc>] (gic_handle_irq+0x34/0x68) from [<c0011f40>] (__irq_svc+0x40/0x70) Exception stack(0xef0b1c00 to 0xef0b1c48) 1c00: 000eb0cf ffffffff 00001300 c01a7738 ef295e10 0000000a c04df298 ef0b1dc0 1c20: ef295ec0 00000000 00000000 00000006 00000000 ef0b1c48 c02b1274 c01a7764 1c40: 20000113 ffffffff [<c0011f40>] (__irq_svc+0x40/0x70) from [<c01a7764>] (__loop_delay+0x0/0xc) Code: e1a00005 e0891006 e0662004 e12fff33 (e59a3018) ---[ end trace a7043b9ba9aed1db ]--- Kernel panic - not syncing: Fatal exception in interrupt Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Tested-by: Alim Akhtar <alim.akhtar@samsung.com> Signed-off-by: Chris Ball <cjb@laptop.org>
2013-08-30 15:13:59 +00:00
host->data = NULL;
if (!mrq->sbc && mrq->stop)
dw_mci_command_complete(host, mrq->stop);
else
host->cmd_status = 0;
dw_mci_request_end(host, mrq);
goto unlock;
case STATE_DATA_ERROR:
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
&host->pending_events))
break;
state = STATE_DATA_BUSY;
break;
}
} while (state != prev_state);
host->state = state;
unlock:
spin_unlock(&host->lock);
}
/* push final bytes to part_buf, only use during push */
static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
memcpy((void *)&host->part_buf, buf, cnt);
host->part_buf_count = cnt;
}
/* append bytes to part_buf, only use during push */
static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
host->part_buf_count += cnt;
return cnt;
}
/* pull first bytes from part_buf, only use during pull */
static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
cnt = min_t(int, cnt, host->part_buf_count);
if (cnt) {
memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
cnt);
host->part_buf_count -= cnt;
host->part_buf_start += cnt;
}
return cnt;
}
/* pull final bytes from the part_buf, assuming it's just been filled */
static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
{
memcpy(buf, &host->part_buf, cnt);
host->part_buf_start = cnt;
host->part_buf_count = (1 << host->data_shift) - cnt;
}
static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
{
struct mmc_data *data = host->data;
int init_cnt = cnt;
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len;
if (host->part_buf_count == 2) {
mci_fifo_writew(host->fifo_reg, host->part_buf16);
host->part_buf_count = 0;
}
}
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (unlikely((unsigned long)buf & 0x1)) {
while (cnt >= 2) {
u16 aligned_buf[64];
int len = min(cnt & -2, (int)sizeof(aligned_buf));
int items = len >> 1;
int i;
/* memcpy from input buffer into aligned buffer */
memcpy(aligned_buf, buf, len);
buf += len;
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
}
} else
#endif
{
u16 *pdata = buf;
for (; cnt >= 2; cnt -= 2)
mci_fifo_writew(host->fifo_reg, *pdata++);
buf = pdata;
}
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
/* Push data if we have reached the expected data length */
if ((data->bytes_xfered + init_cnt) ==
(data->blksz * data->blocks))
mci_fifo_writew(host->fifo_reg, host->part_buf16);
}
}
static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (unlikely((unsigned long)buf & 0x1)) {
while (cnt >= 2) {
/* pull data from fifo into aligned buffer */
u16 aligned_buf[64];
int len = min(cnt & -2, (int)sizeof(aligned_buf));
int items = len >> 1;
int i;
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
/* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
cnt -= len;
}
} else
#endif
{
u16 *pdata = buf;
for (; cnt >= 2; cnt -= 2)
*pdata++ = mci_fifo_readw(host->fifo_reg);
buf = pdata;
}
if (cnt) {
host->part_buf16 = mci_fifo_readw(host->fifo_reg);
dw_mci_pull_final_bytes(host, buf, cnt);
}
}
static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
{
struct mmc_data *data = host->data;
int init_cnt = cnt;
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len;
if (host->part_buf_count == 4) {
mci_fifo_writel(host->fifo_reg, host->part_buf32);
host->part_buf_count = 0;
}
}
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (unlikely((unsigned long)buf & 0x3)) {
while (cnt >= 4) {
u32 aligned_buf[32];
int len = min(cnt & -4, (int)sizeof(aligned_buf));
int items = len >> 2;
int i;
/* memcpy from input buffer into aligned buffer */
memcpy(aligned_buf, buf, len);
buf += len;
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
}
} else
#endif
{
u32 *pdata = buf;
for (; cnt >= 4; cnt -= 4)
mci_fifo_writel(host->fifo_reg, *pdata++);
buf = pdata;
}
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
/* Push data if we have reached the expected data length */
if ((data->bytes_xfered + init_cnt) ==
(data->blksz * data->blocks))
mci_fifo_writel(host->fifo_reg, host->part_buf32);
}
}
static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (unlikely((unsigned long)buf & 0x3)) {
while (cnt >= 4) {
/* pull data from fifo into aligned buffer */
u32 aligned_buf[32];
int len = min(cnt & -4, (int)sizeof(aligned_buf));
int items = len >> 2;
int i;
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
/* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
cnt -= len;
}
} else
#endif
{
u32 *pdata = buf;
for (; cnt >= 4; cnt -= 4)
*pdata++ = mci_fifo_readl(host->fifo_reg);
buf = pdata;
}
if (cnt) {
host->part_buf32 = mci_fifo_readl(host->fifo_reg);
dw_mci_pull_final_bytes(host, buf, cnt);
}
}
static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
{
struct mmc_data *data = host->data;
int init_cnt = cnt;
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len;
if (host->part_buf_count == 8) {
mci_fifo_writeq(host->fifo_reg, host->part_buf);
host->part_buf_count = 0;
}
}
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (unlikely((unsigned long)buf & 0x7)) {
while (cnt >= 8) {
u64 aligned_buf[16];
int len = min(cnt & -8, (int)sizeof(aligned_buf));
int items = len >> 3;
int i;
/* memcpy from input buffer into aligned buffer */
memcpy(aligned_buf, buf, len);
buf += len;
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
}
} else
#endif
{
u64 *pdata = buf;
for (; cnt >= 8; cnt -= 8)
mci_fifo_writeq(host->fifo_reg, *pdata++);
buf = pdata;
}
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
/* Push data if we have reached the expected data length */
if ((data->bytes_xfered + init_cnt) ==
(data->blksz * data->blocks))
mci_fifo_writeq(host->fifo_reg, host->part_buf);
}
}
static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (unlikely((unsigned long)buf & 0x7)) {
while (cnt >= 8) {
/* pull data from fifo into aligned buffer */
u64 aligned_buf[16];
int len = min(cnt & -8, (int)sizeof(aligned_buf));
int items = len >> 3;
int i;
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
/* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
cnt -= len;
}
} else
#endif
{
u64 *pdata = buf;
for (; cnt >= 8; cnt -= 8)
*pdata++ = mci_fifo_readq(host->fifo_reg);
buf = pdata;
}
if (cnt) {
host->part_buf = mci_fifo_readq(host->fifo_reg);
dw_mci_pull_final_bytes(host, buf, cnt);
}
}
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
{
int len;
/* get remaining partial bytes */
len = dw_mci_pull_part_bytes(host, buf, cnt);
if (unlikely(len == cnt))
return;
buf += len;
cnt -= len;
/* get the rest of the data */
host->pull_data(host, buf, cnt);
}
static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
{
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
struct sg_mapping_iter *sg_miter = &host->sg_miter;
void *buf;
unsigned int offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int len;
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
unsigned int remain, fcnt;
do {
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
if (!sg_miter_next(sg_miter))
goto done;
host->sg = sg_miter->piter.sg;
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
buf = sg_miter->addr;
remain = sg_miter->length;
offset = 0;
do {
fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
<< shift) + host->part_buf_count;
len = min(remain, fcnt);
if (!len)
break;
dw_mci_pull_data(host, (void *)(buf + offset), len);
data->bytes_xfered += len;
offset += len;
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
remain -= len;
} while (remain);
sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
/* if the RXDR is ready read again */
} while ((status & SDMMC_INT_RXDR) ||
(dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
if (!remain) {
if (!sg_miter_next(sg_miter))
goto done;
sg_miter->consumed = 0;
}
sg_miter_stop(sg_miter);
return;
done:
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
sg_miter_stop(sg_miter);
host->sg = NULL;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_write_data_pio(struct dw_mci *host)
{
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
struct sg_mapping_iter *sg_miter = &host->sg_miter;
void *buf;
unsigned int offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int len;
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
unsigned int fifo_depth = host->fifo_depth;
unsigned int remain, fcnt;
do {
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
if (!sg_miter_next(sg_miter))
goto done;
host->sg = sg_miter->piter.sg;
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
buf = sg_miter->addr;
remain = sg_miter->length;
offset = 0;
do {
fcnt = ((fifo_depth -
SDMMC_GET_FCNT(mci_readl(host, STATUS)))
<< shift) - host->part_buf_count;
len = min(remain, fcnt);
if (!len)
break;
host->push_data(host, (void *)(buf + offset), len);
data->bytes_xfered += len;
offset += len;
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
remain -= len;
} while (remain);
sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
if (!remain) {
if (!sg_miter_next(sg_miter))
goto done;
sg_miter->consumed = 0;
}
sg_miter_stop(sg_miter);
return;
done:
mmc: dw_mmc: Fix PIO mode with support of highmem Current PIO mode makes a kernel crash with CONFIG_HIGHMEM. Highmem pages have a NULL from sg_virt(sg). This patch fixes the following problem. Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01423-gdbf465f #589) PC is at dw_mci_pull_data32+0x4c/0x9c LR is at dw_mci_read_data_pio+0x54/0x1f0 pc : [<c0358824>] lr : [<c035988c>] psr: 20000193 sp : c0619d48 ip : c0619d70 fp : c0619d6c r10: 00000000 r9 : 00000002 r8 : 00001000 r7 : 00000200 r6 : 00000000 r5 : e1dd3100 r4 : 00000000 r3 : 65622023 r2 : 0000007f r1 : eeb96000 r0 : e1dd3100 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment xkernel Control: 10c5387d Table: 61e2004a DAC: 00000015 Process swapper (pid: 0, stack limit = 0xc06182f0) Stack: (0xc0619d48 to 0xc061a000) 9d40: e1dd3100 e1a4f000 00000000 e1dd3100 e1a4f000 00000200 9d60: c0619da4 c0619d70 c035988c c03587e4 c0619d9c e18158f4 e1dd3100 e1dd3100 9d80: 00000020 00000000 00000000 00000020 c06e8a84 00000000 c0619e04 c0619da8 9da0: c0359b24 c0359844 e18158f4 e1dd3164 e1dd3168 e1dd3150 3d02fc79 e1dd3154 9dc0: e1dd3178 00000000 00000020 00000000 e1dd3150 00000000 c10dd7e8 e1a84900 9de0: c061e7cc 00000000 00000000 0000008d c06e8a84 c061e780 c0619e4c c0619e08 9e00: c00c4738 c0359a34 3d02fc79 00000000 c0619e4c c05a1698 c05a1670 c05a165c 9e20: c04de8b0 c061e780 c061e7cc e1a84900 ffffed68 0000008d c0618000 00000000 9e40: c0619e6c c0619e50 c00c48b4 c00c46c8 c061e780 c00423ac c061e7cc ffffed68 9e60: c0619e8c c0619e70 c00c7358 c00c487c 0000008d ffffee38 c0618000 ffffed68 9e80: c0619ea4 c0619e90 c00c4258 c00c72b0 c00423ac ffffee38 c0619ecc c0619ea8 9ea0: c004241c c00c4234 ffffffff f8810000 0000006d 00000002 00000001 7fffffff 9ec0: c0619f44 c0619ed0 c0048bc0 c00423c4 220ae7a9 00000000 386f0d30 0005d3a4 9ee0: c00423ac c10dd0b8 c06f2cd8 c0618000 c0594778 c003a674 7fffffff c0619f44 9f00: 386f0d30 c0619f18 c00a6f94 c005be3c 80000013 ffffffff 386f0d30 0005d3a4 9f20: 386f0d30 0005d2d1 c10dd0a8 c10dd0b8 c06f2cd8 c0618000 c0619f74 c0619f48 9f40: c0345858 c005be00 c00a2440 c0618000 c0618000 c00410d8 c06c1944 c00410fc 9f60: c0594778 c003a674 c0619f9c c0619f78 c004a7e8 c03457b4 c0618000 c06c18f8 9f80: 00000000 c0039c70 c06c18d4 c003a674 c0619fb4 c0619fa0 c04ceafc c004a714 9fa0: c06287b4 c06c18f8 c0619ff4 c0619fb8 c0008b68 c04cea68 c0008578 00000000 9fc0: 00000000 c003a674 00000000 10c5387d c0628658 c003aa78 c062f1c4 4000406a 9fe0: 413fc090 00000000 00000000 c0619ff8 40008044 c0008858 00000000 00000000 Backtrace: [<c03587d8>] (dw_mci_pull_data32+0x0/0x9c) from [<c035988c>] (dw_mci_read_data_pio+0x54/0x1f0) r6:00000200 r5:e1a4f000 r4:e1dd3100 [<c0359838>] (dw_mci_read_data_pio+0x0/0x1f0) from [<c0359b24>] (dw_mci_interrupt+0xfc/0x4a4) [<c0359a28>] (dw_mci_interrupt+0x0/0x4a4) from [<c00c4738>] (handle_irq_event_percpu+0x7c/0x1b4) [<c00c46bc>] (handle_irq_event_percpu+0x0/0x1b4) from [<c00c48b4>] (handle_irq_event+0x44/0x64) [<c00c4870>] (handle_irq_event+0x0/0x64) from [<c00c7358>] (handle_fasteoi_irq+0xb4/0x124) r7:ffffed68 r6:c061e7cc r5:c00423ac r4:c061e780 [<c00c72a4>] (handle_fasteoi_irq+0x0/0x124) from [<c00c4258>] (generic_handle_irq+0x30/0x38) r7:ffffed68 r6:c0618000 r5:ffffee38 r4:0000008d [<c00c4228>] (generic_handle_irq+0x0/0x38) from [<c004241c>] (asm_do_IRQ+0x64/0xe0) r5:ffffee38 r4:c00423ac [<c00423b8>] (asm_do_IRQ+0x0/0xe0) from [<c0048bc0>] (__irq_svc+0x80/0x14c) Exception stack(0xc0619ed0 to 0xc0619f18) Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com> Acked-by: Will Newton <will.newton@imgtec.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Chris Ball <cjb@laptop.org>
2012-02-09 05:32:43 +00:00
sg_miter_stop(sg_miter);
host->sg = NULL;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
{
if (!host->cmd_status)
host->cmd_status = status;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
mmc: dw_mmc: Remove old card detect infrastructure The dw_mmc driver had a bunch of code that ran whenever a card was ejected and inserted. However, this code was old and crufty and should be removed. Some evidence that it's really not needed: 1. Is is supposed to be legal to use 'cd-gpio' on dw_mmc instead of using the built-in card detect mechanism. The 'cd-gpio' code doesn't run any of the crufty old code but yet still works. 2. While looking at this, I realized that my old change (369ac86 mmc: dw_mmc: don't queue up a card detect at slot startup) actually castrated the old code a little bit already and nobody noticed. Specifically "last_detect_state" was left as 0 at bootup. That means that on the first card removal none of the crufty code ran. 3. I can run "while true; do dd if=/dev/mmcblk1 of=/dev/null; done" while ejecting and inserting an SD Card and the world doesn't explode. If some of the crufty old code is actually needed, we should justify it and also put it in some place where it will be run even with "cd-gpio". Note that in my case I'm using the "cd-gpio" mechanism but for various reasons the hardware triggers a dw_mmc "card detect" at bootup. That was actually causing a real bug. The card detect workqueue was running while the system was trying to enumerate the card. The "present != slot->last_detect_state" triggered and we were doing all kinds of crazy stuff and messing up enumeration. The new mechanism of just asking the core to check the card is much safer and then the bogus interrupt doesn't hurt. Signed-off-by: Doug Anderson <dianders@chromium.org> Tested-by: Jaehoon Chung <jh80.chung@samsung.com> Acked-by: Jaehoon Chung <jh80.chung@samsung.com> Tested-by: alim.akhtar <alim.akhtar@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2014-10-14 16:33:09 +00:00
static void dw_mci_handle_cd(struct dw_mci *host)
{
int i;
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
continue;
if (slot->mmc->ops->card_event)
slot->mmc->ops->card_event(slot->mmc);
mmc_detect_change(slot->mmc,
msecs_to_jiffies(host->pdata->detect_delay_ms));
}
}
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
{
struct dw_mci *host = dev_id;
u32 pending;
int i;
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
if (pending) {
/* Check volt switch first, since it can look like an error */
if ((host->state == STATE_SENDING_CMD11) &&
(pending & SDMMC_INT_VOLT_SWITCH)) {
unsigned long irqflags;
mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
pending &= ~SDMMC_INT_VOLT_SWITCH;
/*
* Hold the lock; we know cmd11_timer can't be kicked
* off after the lock is released, so safe to delete.
*/
spin_lock_irqsave(&host->irq_lock, irqflags);
dw_mci_cmd_interrupt(host, pending);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
del_timer(&host->cmd11_timer);
}
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = pending;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
host->data_status = pending;
smp_wmb(); /* drain writebuffer */
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
dw_mci_read_data_pio(host, true);
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_RXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
dw_mci_read_data_pio(host, false);
}
if (pending & SDMMC_INT_TXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
dw_mci_write_data_pio(host);
}
if (pending & SDMMC_INT_CMD_DONE) {
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
dw_mci_cmd_interrupt(host, pending);
}
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
mmc: dw_mmc: Remove old card detect infrastructure The dw_mmc driver had a bunch of code that ran whenever a card was ejected and inserted. However, this code was old and crufty and should be removed. Some evidence that it's really not needed: 1. Is is supposed to be legal to use 'cd-gpio' on dw_mmc instead of using the built-in card detect mechanism. The 'cd-gpio' code doesn't run any of the crufty old code but yet still works. 2. While looking at this, I realized that my old change (369ac86 mmc: dw_mmc: don't queue up a card detect at slot startup) actually castrated the old code a little bit already and nobody noticed. Specifically "last_detect_state" was left as 0 at bootup. That means that on the first card removal none of the crufty code ran. 3. I can run "while true; do dd if=/dev/mmcblk1 of=/dev/null; done" while ejecting and inserting an SD Card and the world doesn't explode. If some of the crufty old code is actually needed, we should justify it and also put it in some place where it will be run even with "cd-gpio". Note that in my case I'm using the "cd-gpio" mechanism but for various reasons the hardware triggers a dw_mmc "card detect" at bootup. That was actually causing a real bug. The card detect workqueue was running while the system was trying to enumerate the card. The "present != slot->last_detect_state" triggered and we were doing all kinds of crazy stuff and messing up enumeration. The new mechanism of just asking the core to check the card is much safer and then the bogus interrupt doesn't hurt. Signed-off-by: Doug Anderson <dianders@chromium.org> Tested-by: Jaehoon Chung <jh80.chung@samsung.com> Acked-by: Jaehoon Chung <jh80.chung@samsung.com> Tested-by: alim.akhtar <alim.akhtar@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2014-10-14 16:33:09 +00:00
dw_mci_handle_cd(host);
}
/* Handle SDIO Interrupts */
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
continue;
if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
mci_writel(host, RINTSTS,
SDMMC_INT_SDIO(slot->sdio_id));
__dw_mci_enable_sdio_irq(slot, 0);
sdio_signal_irq(slot->mmc);
}
}
}
if (host->use_dma != TRANS_MODE_IDMAC)
return IRQ_HANDLED;
/* Handle IDMA interrupts */
if (host->dma_64bit_address == 1) {
pending = mci_readl(host, IDSTS64);
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
mmc: dw_mmc: fix unmap sg twice when finding data err DATA_OVER(the same for RI/TI of IDMAC) interrupt may come up together with data error interrupts. If so, the interrupt routine set EVENT_DATA_ERR to the pending_events and schedule the tasklet but we may still fallback to the IDMAC interrupt case as the tasklet may come up a little late, namely right after the IDMAC interrupt checking. This will casue dw_mmc unmap sg twice. We can easily see it with CONFIG_DMA_API_DEBUG enabled. WARNING: CPU: 0 PID: 0 at lib/dma-debug.c:1096 check_unmap+0x7bc/0xb38 dwmmc_exynos 12200000.mmc: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x000000006d9d2200] [size=128 bytes] Modules linked in: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.7.0-rc4 #26 Hardware name: SAMSUNG EXYNOS (Flattened Device Tree) [<c0112b4c>] (unwind_backtrace) from [<c010d888>] (show_stack+0x20/0x24) [<c010d888>] (show_stack) from [<c03fab0c>] (dump_stack+0x80/0x94) [<c03fab0c>] (dump_stack) from [<c0123548>] (__warn+0xf8/0x110) [<c0123548>] (__warn) from [<c01235a8>] (warn_slowpath_fmt+0x48/0x50) [<c01235a8>] (warn_slowpath_fmt) from [<c042ac90>] (check_unmap+0x7bc/0xb38) [<c042ac90>] (check_unmap) from [<c042b25c>] (debug_dma_unmap_sg+0x118/0x148) [<c042b25c>] (debug_dma_unmap_sg) from [<c077512c>] (dw_mci_dma_cleanup+0x7c/0xb8) [<c077512c>] (dw_mci_dma_cleanup) from [<c0773f24>] (dw_mci_stop_dma+0x40/0x50) [<c0773f24>] (dw_mci_stop_dma) from [<c0777d04>] (dw_mci_tasklet_func+0x130/0x3b4) [<c0777d04>] (dw_mci_tasklet_func) from [<c0129760>] (tasklet_action+0xb4/0x150) ..[snip].. ---[ end trace 256f83eed365daf0 ]--- Reported-by: Seung-Woo Kim <sw0312.kim@samsung.com> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-06-24 07:39:52 +00:00
if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
host->dma_ops->complete((void *)host);
}
} else {
pending = mci_readl(host, IDSTS);
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
mmc: dw_mmc: fix unmap sg twice when finding data err DATA_OVER(the same for RI/TI of IDMAC) interrupt may come up together with data error interrupts. If so, the interrupt routine set EVENT_DATA_ERR to the pending_events and schedule the tasklet but we may still fallback to the IDMAC interrupt case as the tasklet may come up a little late, namely right after the IDMAC interrupt checking. This will casue dw_mmc unmap sg twice. We can easily see it with CONFIG_DMA_API_DEBUG enabled. WARNING: CPU: 0 PID: 0 at lib/dma-debug.c:1096 check_unmap+0x7bc/0xb38 dwmmc_exynos 12200000.mmc: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x000000006d9d2200] [size=128 bytes] Modules linked in: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.7.0-rc4 #26 Hardware name: SAMSUNG EXYNOS (Flattened Device Tree) [<c0112b4c>] (unwind_backtrace) from [<c010d888>] (show_stack+0x20/0x24) [<c010d888>] (show_stack) from [<c03fab0c>] (dump_stack+0x80/0x94) [<c03fab0c>] (dump_stack) from [<c0123548>] (__warn+0xf8/0x110) [<c0123548>] (__warn) from [<c01235a8>] (warn_slowpath_fmt+0x48/0x50) [<c01235a8>] (warn_slowpath_fmt) from [<c042ac90>] (check_unmap+0x7bc/0xb38) [<c042ac90>] (check_unmap) from [<c042b25c>] (debug_dma_unmap_sg+0x118/0x148) [<c042b25c>] (debug_dma_unmap_sg) from [<c077512c>] (dw_mci_dma_cleanup+0x7c/0xb8) [<c077512c>] (dw_mci_dma_cleanup) from [<c0773f24>] (dw_mci_stop_dma+0x40/0x50) [<c0773f24>] (dw_mci_stop_dma) from [<c0777d04>] (dw_mci_tasklet_func+0x130/0x3b4) [<c0777d04>] (dw_mci_tasklet_func) from [<c0129760>] (tasklet_action+0xb4/0x150) ..[snip].. ---[ end trace 256f83eed365daf0 ]--- Reported-by: Seung-Woo Kim <sw0312.kim@samsung.com> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-06-24 07:39:52 +00:00
if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
host->dma_ops->complete((void *)host);
}
}
return IRQ_HANDLED;
}
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
const struct dw_mci_drv_data *drv_data = host->drv_data;
int ctrl_id, ret;
u32 freq[2];
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
if (!mmc)
return -ENOMEM;
slot = mmc_priv(mmc);
slot->id = id;
slot->sdio_id = host->sdio_id0 + id;
slot->mmc = mmc;
slot->host = host;
host->slot[id] = slot;
mmc->ops = &dw_mci_ops;
if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
freq, 2)) {
mmc->f_min = DW_MCI_FREQ_MIN;
mmc->f_max = DW_MCI_FREQ_MAX;
} else {
dev_info(host->dev,
"'clock-freq-min-max' property was deprecated.\n");
mmc->f_min = freq[0];
mmc->f_max = freq[1];
}
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply(mmc);
if (ret == -EPROBE_DEFER)
goto err_host_allocated;
if (!mmc->ocr_avail)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
/*
* Support MMC_CAP_ERASE by default.
* It needs to use trim/discard/erase commands.
*/
mmc->caps |= MMC_CAP_ERASE;
if (host->pdata->pm_caps)
mmc->pm_caps = host->pdata->pm_caps;
if (host->dev->of_node) {
ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
if (ctrl_id < 0)
ctrl_id = 0;
} else {
ctrl_id = to_platform_device(host->dev)->id;
}
if (drv_data && drv_data->caps)
mmc->caps |= drv_data->caps[ctrl_id];
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
ret = mmc_of_parse(mmc);
if (ret)
goto err_host_allocated;
/* Process SDIO IRQs through the sdio_irq_work. */
if (mmc->caps & MMC_CAP_SDIO_IRQ)
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
/* Useful defaults if platform data is unset. */
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65535;
mmc->max_seg_size = 0x1000;
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64;
mmc->max_blk_size = 65535;
mmc->max_blk_count = 65535;
mmc->max_req_size =
mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
} else {
/* TRANS_MODE_PIO */
mmc->max_segs = 64;
mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
mmc->max_req_size = mmc->max_blk_size *
mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
}
dw_mci_get_cd(mmc);
ret = mmc_add_host(mmc);
if (ret)
goto err_host_allocated;
#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
#endif
return 0;
err_host_allocated:
mmc_free_host(mmc);
return ret;
}
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
{
/* Debugfs stuff is cleaned up by mmc core */
mmc_remove_host(slot->mmc);
slot->host->slot[id] = NULL;
mmc_free_host(slot->mmc);
}
static void dw_mci_init_dma(struct dw_mci *host)
{
int addr_config;
struct device *dev = host->dev;
/*
* Check tansfer mode from HCON[17:16]
* Clear the ambiguous description of dw_mmc databook:
* 2b'00: No DMA Interface -> Actually means using Internal DMA block
* 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
* 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
* 2b'11: Non DW DMA Interface -> pio only
* Compared to DesignWare DMA Interface, Generic DMA Interface has a
* simpler request/acknowledge handshake mechanism and both of them
* are regarded as external dma master for dw_mmc.
*/
host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
if (host->use_dma == DMA_INTERFACE_IDMA) {
host->use_dma = TRANS_MODE_IDMAC;
} else if (host->use_dma == DMA_INTERFACE_DWDMA ||
host->use_dma == DMA_INTERFACE_GDMA) {
host->use_dma = TRANS_MODE_EDMAC;
} else {
goto no_dma;
}
/* Determine which DMA interface to use */
if (host->use_dma == TRANS_MODE_IDMAC) {
/*
* Check ADDR_CONFIG bit in HCON to find
* IDMAC address bus width
*/
addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
if (addr_config == 1) {
/* host supports IDMAC in 64-bit address mode */
host->dma_64bit_address = 1;
dev_info(host->dev,
"IDMAC supports 64-bit address mode.\n");
if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
dma_set_coherent_mask(host->dev,
DMA_BIT_MASK(64));
} else {
/* host supports IDMAC in 32-bit address mode */
host->dma_64bit_address = 0;
dev_info(host->dev,
"IDMAC supports 32-bit address mode.\n");
}
/* Alloc memory for sg translation */
host->sg_cpu = dmam_alloc_coherent(host->dev,
DESC_RING_BUF_SZ,
&host->sg_dma, GFP_KERNEL);
if (!host->sg_cpu) {
dev_err(host->dev,
"%s: could not alloc DMA memory\n",
__func__);
goto no_dma;
}
host->dma_ops = &dw_mci_idmac_ops;
dev_info(host->dev, "Using internal DMA controller.\n");
} else {
/* TRANS_MODE_EDMAC: check dma bindings again */
if ((device_property_read_string_array(dev, "dma-names",
NULL, 0) < 0) ||
!device_property_present(dev, "dmas")) {
goto no_dma;
}
host->dma_ops = &dw_mci_edmac_ops;
dev_info(host->dev, "Using external DMA controller.\n");
}
if (host->dma_ops->init && host->dma_ops->start &&
host->dma_ops->stop && host->dma_ops->cleanup) {
if (host->dma_ops->init(host)) {
dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
__func__);
goto no_dma;
}
} else {
dev_err(host->dev, "DMA initialization not found.\n");
goto no_dma;
}
return;
no_dma:
dev_info(host->dev, "Using PIO mode.\n");
host->use_dma = TRANS_MODE_PIO;
}
static void dw_mci_cmd11_timer(unsigned long arg)
{
struct dw_mci *host = (struct dw_mci *)arg;
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
return;
}
host->cmd_status = SDMMC_INT_RTO;
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
static void dw_mci_dto_timer(unsigned long arg)
{
struct dw_mci *host = (struct dw_mci *)arg;
switch (host->state) {
case STATE_SENDING_DATA:
case STATE_DATA_BUSY:
/*
* If DTO interrupt does NOT come in sending data state,
* we should notify the driver to terminate current transfer
* and report a data timeout to the core.
*/
host->data_status = SDMMC_INT_DRTO;
set_bit(EVENT_DATA_ERROR, &host->pending_events);
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
break;
default:
break;
}
}
#ifdef CONFIG_OF
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{
struct dw_mci_board *pdata;
struct device *dev = host->dev;
const struct dw_mci_drv_data *drv_data = host->drv_data;
int ret;
u32 clock_frequency;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
/* find reset controller when exist */
pdata->rstc = devm_reset_control_get_optional(dev, "reset");
if (IS_ERR(pdata->rstc)) {
if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
return ERR_PTR(-EPROBE_DEFER);
}
/* find out number of slots supported */
device_property_read_u32(dev, "num-slots", &pdata->num_slots);
if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
dev_info(dev,
"fifo-depth property not found, using value of FIFOTH register as default\n");
device_property_read_u32(dev, "card-detect-delay",
&pdata->detect_delay_ms);
device_property_read_u32(dev, "data-addr", &host->data_addr_override);
if (device_property_present(dev, "fifo-watermark-aligned"))
host->wm_aligned = true;
if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
pdata->bus_hz = clock_frequency;
if (drv_data && drv_data->parse_dt) {
ret = drv_data->parse_dt(host);
if (ret)
return ERR_PTR(ret);
}
return pdata;
}
#else /* CONFIG_OF */
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{
return ERR_PTR(-EINVAL);
}
#endif /* CONFIG_OF */
static void dw_mci_enable_cd(struct dw_mci *host)
{
unsigned long irqflags;
u32 temp;
int i;
struct dw_mci_slot *slot;
/*
* No need for CD if all slots have a non-error GPIO
* as well as broken card detection is found.
*/
for (i = 0; i < host->num_slots; i++) {
slot = host->slot[i];
if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
return;
remove lots of IS_ERR_VALUE abuses Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-27 21:23:25 +00:00
if (mmc_gpio_get_cd(slot->mmc) < 0)
break;
}
if (i == host->num_slots)
return;
spin_lock_irqsave(&host->irq_lock, irqflags);
temp = mci_readl(host, INTMASK);
temp |= SDMMC_INT_CD;
mci_writel(host, INTMASK, temp);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
int dw_mci_probe(struct dw_mci *host)
{
const struct dw_mci_drv_data *drv_data = host->drv_data;
int width, i, ret = 0;
u32 fifo_size;
int init_slots = 0;
if (!host->pdata) {
host->pdata = dw_mci_parse_dt(host);
if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(host->pdata)) {
dev_err(host->dev, "platform data not available\n");
return -EINVAL;
}
}
host->biu_clk = devm_clk_get(host->dev, "biu");
if (IS_ERR(host->biu_clk)) {
dev_dbg(host->dev, "biu clock not available\n");
} else {
ret = clk_prepare_enable(host->biu_clk);
if (ret) {
dev_err(host->dev, "failed to enable biu clock\n");
return ret;
}
}
host->ciu_clk = devm_clk_get(host->dev, "ciu");
if (IS_ERR(host->ciu_clk)) {
dev_dbg(host->dev, "ciu clock not available\n");
host->bus_hz = host->pdata->bus_hz;
} else {
ret = clk_prepare_enable(host->ciu_clk);
if (ret) {
dev_err(host->dev, "failed to enable ciu clock\n");
goto err_clk_biu;
}
if (host->pdata->bus_hz) {
ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
if (ret)
dev_warn(host->dev,
"Unable to set bus rate to %uHz\n",
host->pdata->bus_hz);
}
host->bus_hz = clk_get_rate(host->ciu_clk);
}
if (!host->bus_hz) {
dev_err(host->dev,
"Platform data must supply bus speed\n");
ret = -ENODEV;
goto err_clk_ciu;
}
if (drv_data && drv_data->init) {
ret = drv_data->init(host);
if (ret) {
dev_err(host->dev,
"implementation specific init failed\n");
goto err_clk_ciu;
}
}
if (!IS_ERR(host->pdata->rstc)) {
reset_control_assert(host->pdata->rstc);
usleep_range(10, 50);
reset_control_deassert(host->pdata->rstc);
}
setup_timer(&host->cmd11_timer,
dw_mci_cmd11_timer, (unsigned long)host);
setup_timer(&host->dto_timer,
dw_mci_dto_timer, (unsigned long)host);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
INIT_LIST_HEAD(&host->queue);
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values.
*/
i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
if (!i) {
host->push_data = dw_mci_push_data16;
host->pull_data = dw_mci_pull_data16;
width = 16;
host->data_shift = 1;
} else if (i == 2) {
host->push_data = dw_mci_push_data64;
host->pull_data = dw_mci_pull_data64;
width = 64;
host->data_shift = 3;
} else {
/* Check for a reserved value, and warn if it is */
WARN((i != 1),
"HCON reports a reserved host data width!\n"
"Defaulting to 32-bit access.\n");
host->push_data = dw_mci_push_data32;
host->pull_data = dw_mci_pull_data32;
width = 32;
host->data_shift = 2;
}
/* Reset all blocks */
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
ret = -ENODEV;
goto err_clk_ciu;
}
host->dma_ops = host->pdata->dma_ops;
dw_mci_init_dma(host);
/* Clear the interrupts for the host controller */
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
/*
* FIFO threshold settings RxMark = fifo_size / 2 - 1,
* Tx Mark = fifo_size / 2 DMA Size = 8
*/
if (!host->pdata->fifo_depth) {
/*
* Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
* have been overwritten by the bootloader, just like we're
* about to do, so if you know the value for your hardware, you
* should put it in the platform data.
*/
fifo_size = mci_readl(host, FIFOTH);
fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
} else {
fifo_size = host->pdata->fifo_depth;
}
host->fifo_depth = fifo_size;
host->fifoth_val =
SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
mci_writel(host, FIFOTH, host->fifoth_val);
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
/*
* In 2.40a spec, Data offset is changed.
* Need to check the version-id and set data-offset for DATA register.
*/
host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
dev_info(host->dev, "Version ID is %04x\n", host->verid);
if (host->data_addr_override)
host->fifo_reg = host->regs + host->data_addr_override;
else if (host->verid < DW_MMC_240A)
host->fifo_reg = host->regs + DATA_OFFSET;
else
host->fifo_reg = host->regs + DATA_240A_OFFSET;
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
host->irq_flags, "dw-mci", host);
if (ret)
mmc: dw_mmc: Remove old card detect infrastructure The dw_mmc driver had a bunch of code that ran whenever a card was ejected and inserted. However, this code was old and crufty and should be removed. Some evidence that it's really not needed: 1. Is is supposed to be legal to use 'cd-gpio' on dw_mmc instead of using the built-in card detect mechanism. The 'cd-gpio' code doesn't run any of the crufty old code but yet still works. 2. While looking at this, I realized that my old change (369ac86 mmc: dw_mmc: don't queue up a card detect at slot startup) actually castrated the old code a little bit already and nobody noticed. Specifically "last_detect_state" was left as 0 at bootup. That means that on the first card removal none of the crufty code ran. 3. I can run "while true; do dd if=/dev/mmcblk1 of=/dev/null; done" while ejecting and inserting an SD Card and the world doesn't explode. If some of the crufty old code is actually needed, we should justify it and also put it in some place where it will be run even with "cd-gpio". Note that in my case I'm using the "cd-gpio" mechanism but for various reasons the hardware triggers a dw_mmc "card detect" at bootup. That was actually causing a real bug. The card detect workqueue was running while the system was trying to enumerate the card. The "present != slot->last_detect_state" triggered and we were doing all kinds of crazy stuff and messing up enumeration. The new mechanism of just asking the core to check the card is much safer and then the bogus interrupt doesn't hurt. Signed-off-by: Doug Anderson <dianders@chromium.org> Tested-by: Jaehoon Chung <jh80.chung@samsung.com> Acked-by: Jaehoon Chung <jh80.chung@samsung.com> Tested-by: alim.akhtar <alim.akhtar@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2014-10-14 16:33:09 +00:00
goto err_dmaunmap;
if (host->pdata->num_slots)
host->num_slots = host->pdata->num_slots;
else
host->num_slots = 1;
if (host->num_slots < 1 ||
host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
dev_err(host->dev,
"Platform data must supply correct num_slots.\n");
ret = -ENODEV;
goto err_clk_ciu;
}
/*
* Enable interrupts for command done, data over, data empty,
* receive ready and error such as transmit, receive timeout, crc error
*/
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS);
/* Enable mci interrupt */
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
dev_info(host->dev,
"DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
host->irq, width, fifo_size);
/* We need at least one slot to succeed */
for (i = 0; i < host->num_slots; i++) {
ret = dw_mci_init_slot(host, i);
if (ret)
dev_dbg(host->dev, "slot %d init failed\n", i);
else
init_slots++;
}
if (init_slots) {
dev_info(host->dev, "%d slots initialized\n", init_slots);
} else {
dev_dbg(host->dev,
"attempted to initialize %d slots, but failed on all\n",
host->num_slots);
mmc: dw_mmc: Remove old card detect infrastructure The dw_mmc driver had a bunch of code that ran whenever a card was ejected and inserted. However, this code was old and crufty and should be removed. Some evidence that it's really not needed: 1. Is is supposed to be legal to use 'cd-gpio' on dw_mmc instead of using the built-in card detect mechanism. The 'cd-gpio' code doesn't run any of the crufty old code but yet still works. 2. While looking at this, I realized that my old change (369ac86 mmc: dw_mmc: don't queue up a card detect at slot startup) actually castrated the old code a little bit already and nobody noticed. Specifically "last_detect_state" was left as 0 at bootup. That means that on the first card removal none of the crufty code ran. 3. I can run "while true; do dd if=/dev/mmcblk1 of=/dev/null; done" while ejecting and inserting an SD Card and the world doesn't explode. If some of the crufty old code is actually needed, we should justify it and also put it in some place where it will be run even with "cd-gpio". Note that in my case I'm using the "cd-gpio" mechanism but for various reasons the hardware triggers a dw_mmc "card detect" at bootup. That was actually causing a real bug. The card detect workqueue was running while the system was trying to enumerate the card. The "present != slot->last_detect_state" triggered and we were doing all kinds of crazy stuff and messing up enumeration. The new mechanism of just asking the core to check the card is much safer and then the bogus interrupt doesn't hurt. Signed-off-by: Doug Anderson <dianders@chromium.org> Tested-by: Jaehoon Chung <jh80.chung@samsung.com> Acked-by: Jaehoon Chung <jh80.chung@samsung.com> Tested-by: alim.akhtar <alim.akhtar@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2014-10-14 16:33:09 +00:00
goto err_dmaunmap;
}
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd(host);
return 0;
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
if (!IS_ERR(host->pdata->rstc))
reset_control_assert(host->pdata->rstc);
err_clk_ciu:
clk_disable_unprepare(host->ciu_clk);
err_clk_biu:
clk_disable_unprepare(host->biu_clk);
return ret;
}
EXPORT_SYMBOL(dw_mci_probe);
void dw_mci_remove(struct dw_mci *host)
{
int i;
for (i = 0; i < host->num_slots; i++) {
dev_dbg(host->dev, "remove slot %d\n", i);
if (host->slot[i])
dw_mci_cleanup_slot(host->slot[i], i);
}
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
if (!IS_ERR(host->pdata->rstc))
reset_control_assert(host->pdata->rstc);
clk_disable_unprepare(host->ciu_clk);
clk_disable_unprepare(host->biu_clk);
}
EXPORT_SYMBOL(dw_mci_remove);
#ifdef CONFIG_PM
int dw_mci_runtime_suspend(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
clk_disable_unprepare(host->ciu_clk);
if (host->cur_slot &&
(mmc_can_gpio_cd(host->cur_slot->mmc) ||
!mmc_card_is_removable(host->cur_slot->mmc)))
clk_disable_unprepare(host->biu_clk);
return 0;
}
EXPORT_SYMBOL(dw_mci_runtime_suspend);
int dw_mci_runtime_resume(struct device *dev)
{
int i, ret = 0;
struct dw_mci *host = dev_get_drvdata(dev);
if (host->cur_slot &&
(mmc_can_gpio_cd(host->cur_slot->mmc) ||
!mmc_card_is_removable(host->cur_slot->mmc))) {
ret = clk_prepare_enable(host->biu_clk);
if (ret)
return ret;
}
ret = clk_prepare_enable(host->ciu_clk);
if (ret)
goto err;
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
clk_disable_unprepare(host->ciu_clk);
ret = -ENODEV;
goto err;
}
if (host->use_dma && host->dma_ops->init)
host->dma_ops->init(host);
/*
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
mci_writel(host, FIFOTH, host->fifoth_val);
host->prev_blksz = 0;
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS);
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
continue;
if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
/* Force setup bus to guarantee available clock output */
dw_mci_setup_bus(slot, true);
}
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd(host);
return 0;
err:
if (host->cur_slot &&
(mmc_can_gpio_cd(host->cur_slot->mmc) ||
!mmc_card_is_removable(host->cur_slot->mmc)))
clk_disable_unprepare(host->biu_clk);
return ret;
}
EXPORT_SYMBOL(dw_mci_runtime_resume);
#endif /* CONFIG_PM */
static int __init dw_mci_init(void)
{
pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
return 0;
}
static void __exit dw_mci_exit(void)
{
}
module_init(dw_mci_init);
module_exit(dw_mci_exit);
MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
MODULE_AUTHOR("NXP Semiconductor VietNam");
MODULE_AUTHOR("Imagination Technologies Ltd");
MODULE_LICENSE("GPL v2");