mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
0d3e0460f3
The MMC spec states that the timeout for accessing the CSD and CID registers is 64 clock cycles. Signed-off-by: Matthew Fleming <matthew.fleming@imgtec.com> Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
402 lines
7.8 KiB
C
402 lines
7.8 KiB
C
/*
|
|
* linux/drivers/mmc/core/mmc_ops.h
|
|
*
|
|
* Copyright 2006-2007 Pierre Ossman
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or (at
|
|
* your option) any later version.
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/scatterlist.h>
|
|
|
|
#include <linux/mmc/host.h>
|
|
#include <linux/mmc/card.h>
|
|
#include <linux/mmc/mmc.h>
|
|
|
|
#include "core.h"
|
|
#include "mmc_ops.h"
|
|
|
|
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
|
|
{
|
|
int err;
|
|
struct mmc_command cmd;
|
|
|
|
BUG_ON(!host);
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_SELECT_CARD;
|
|
|
|
if (card) {
|
|
cmd.arg = card->rca << 16;
|
|
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
|
} else {
|
|
cmd.arg = 0;
|
|
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
|
|
}
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
|
if (err)
|
|
return err;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mmc_select_card(struct mmc_card *card)
|
|
{
|
|
BUG_ON(!card);
|
|
|
|
return _mmc_select_card(card->host, card);
|
|
}
|
|
|
|
int mmc_deselect_cards(struct mmc_host *host)
|
|
{
|
|
return _mmc_select_card(host, NULL);
|
|
}
|
|
|
|
int mmc_go_idle(struct mmc_host *host)
|
|
{
|
|
int err;
|
|
struct mmc_command cmd;
|
|
|
|
/*
|
|
* Non-SPI hosts need to prevent chipselect going active during
|
|
* GO_IDLE; that would put chips into SPI mode. Remind them of
|
|
* that in case of hardware that won't pull up DAT3/nCS otherwise.
|
|
*
|
|
* SPI hosts ignore ios.chip_select; it's managed according to
|
|
* rules that must accomodate non-MMC slaves which this layer
|
|
* won't even know about.
|
|
*/
|
|
if (!mmc_host_is_spi(host)) {
|
|
mmc_set_chip_select(host, MMC_CS_HIGH);
|
|
mmc_delay(1);
|
|
}
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_GO_IDLE_STATE;
|
|
cmd.arg = 0;
|
|
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
|
|
mmc_delay(1);
|
|
|
|
if (!mmc_host_is_spi(host)) {
|
|
mmc_set_chip_select(host, MMC_CS_DONTCARE);
|
|
mmc_delay(1);
|
|
}
|
|
|
|
host->use_spi_crc = 0;
|
|
|
|
return err;
|
|
}
|
|
|
|
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
|
|
{
|
|
struct mmc_command cmd;
|
|
int i, err = 0;
|
|
|
|
BUG_ON(!host);
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_SEND_OP_COND;
|
|
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
|
|
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
|
|
|
|
for (i = 100; i; i--) {
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
if (err)
|
|
break;
|
|
|
|
/* if we're just probing, do a single pass */
|
|
if (ocr == 0)
|
|
break;
|
|
|
|
/* otherwise wait until reset completes */
|
|
if (mmc_host_is_spi(host)) {
|
|
if (!(cmd.resp[0] & R1_SPI_IDLE))
|
|
break;
|
|
} else {
|
|
if (cmd.resp[0] & MMC_CARD_BUSY)
|
|
break;
|
|
}
|
|
|
|
err = -ETIMEDOUT;
|
|
|
|
mmc_delay(10);
|
|
}
|
|
|
|
if (rocr && !mmc_host_is_spi(host))
|
|
*rocr = cmd.resp[0];
|
|
|
|
return err;
|
|
}
|
|
|
|
int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
|
|
{
|
|
int err;
|
|
struct mmc_command cmd;
|
|
|
|
BUG_ON(!host);
|
|
BUG_ON(!cid);
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_ALL_SEND_CID;
|
|
cmd.arg = 0;
|
|
cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
|
if (err)
|
|
return err;
|
|
|
|
memcpy(cid, cmd.resp, sizeof(u32) * 4);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mmc_set_relative_addr(struct mmc_card *card)
|
|
{
|
|
int err;
|
|
struct mmc_command cmd;
|
|
|
|
BUG_ON(!card);
|
|
BUG_ON(!card->host);
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_SET_RELATIVE_ADDR;
|
|
cmd.arg = card->rca << 16;
|
|
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
|
|
|
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
|
|
if (err)
|
|
return err;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
|
|
{
|
|
int err;
|
|
struct mmc_command cmd;
|
|
|
|
BUG_ON(!host);
|
|
BUG_ON(!cxd);
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = opcode;
|
|
cmd.arg = arg;
|
|
cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
|
if (err)
|
|
return err;
|
|
|
|
memcpy(cxd, cmd.resp, sizeof(u32) * 4);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
|
|
u32 opcode, void *buf, unsigned len)
|
|
{
|
|
struct mmc_request mrq;
|
|
struct mmc_command cmd;
|
|
struct mmc_data data;
|
|
struct scatterlist sg;
|
|
void *data_buf;
|
|
|
|
/* dma onto stack is unsafe/nonportable, but callers to this
|
|
* routine normally provide temporary on-stack buffers ...
|
|
*/
|
|
data_buf = kmalloc(len, GFP_KERNEL);
|
|
if (data_buf == NULL)
|
|
return -ENOMEM;
|
|
|
|
memset(&mrq, 0, sizeof(struct mmc_request));
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
memset(&data, 0, sizeof(struct mmc_data));
|
|
|
|
mrq.cmd = &cmd;
|
|
mrq.data = &data;
|
|
|
|
cmd.opcode = opcode;
|
|
cmd.arg = 0;
|
|
|
|
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
|
|
* rely on callers to never use this with "native" calls for reading
|
|
* CSD or CID. Native versions of those commands use the R2 type,
|
|
* not R1 plus a data block.
|
|
*/
|
|
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
|
|
|
|
data.blksz = len;
|
|
data.blocks = 1;
|
|
data.flags = MMC_DATA_READ;
|
|
data.sg = &sg;
|
|
data.sg_len = 1;
|
|
|
|
sg_init_one(&sg, data_buf, len);
|
|
|
|
/*
|
|
* The spec states that CSR and CID accesses have a timeout
|
|
* of 64 clock cycles.
|
|
*/
|
|
data.timeout_ns = 0;
|
|
data.timeout_clks = 64;
|
|
|
|
mmc_wait_for_req(host, &mrq);
|
|
|
|
memcpy(buf, data_buf, len);
|
|
kfree(data_buf);
|
|
|
|
if (cmd.error)
|
|
return cmd.error;
|
|
if (data.error)
|
|
return data.error;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mmc_send_csd(struct mmc_card *card, u32 *csd)
|
|
{
|
|
int ret, i;
|
|
|
|
if (!mmc_host_is_spi(card->host))
|
|
return mmc_send_cxd_native(card->host, card->rca << 16,
|
|
csd, MMC_SEND_CSD);
|
|
|
|
ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
|
|
if (ret)
|
|
return ret;
|
|
|
|
for (i = 0;i < 4;i++)
|
|
csd[i] = be32_to_cpu(csd[i]);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mmc_send_cid(struct mmc_host *host, u32 *cid)
|
|
{
|
|
int ret, i;
|
|
|
|
if (!mmc_host_is_spi(host)) {
|
|
if (!host->card)
|
|
return -EINVAL;
|
|
return mmc_send_cxd_native(host, host->card->rca << 16,
|
|
cid, MMC_SEND_CID);
|
|
}
|
|
|
|
ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
|
|
if (ret)
|
|
return ret;
|
|
|
|
for (i = 0;i < 4;i++)
|
|
cid[i] = be32_to_cpu(cid[i]);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
|
{
|
|
return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
|
|
ext_csd, 512);
|
|
}
|
|
|
|
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
|
|
{
|
|
struct mmc_command cmd;
|
|
int err;
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_SPI_READ_OCR;
|
|
cmd.arg = highcap ? (1 << 30) : 0;
|
|
cmd.flags = MMC_RSP_SPI_R3;
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
|
|
*ocrp = cmd.resp[1];
|
|
return err;
|
|
}
|
|
|
|
int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
|
|
{
|
|
struct mmc_command cmd;
|
|
int err;
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_SPI_CRC_ON_OFF;
|
|
cmd.flags = MMC_RSP_SPI_R1;
|
|
cmd.arg = use_crc;
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
if (!err)
|
|
host->use_spi_crc = use_crc;
|
|
return err;
|
|
}
|
|
|
|
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
|
|
{
|
|
int err;
|
|
struct mmc_command cmd;
|
|
|
|
BUG_ON(!card);
|
|
BUG_ON(!card->host);
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_SWITCH;
|
|
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
|
|
(index << 16) |
|
|
(value << 8) |
|
|
set;
|
|
cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
|
|
|
|
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
|
|
if (err)
|
|
return err;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mmc_send_status(struct mmc_card *card, u32 *status)
|
|
{
|
|
int err;
|
|
struct mmc_command cmd;
|
|
|
|
BUG_ON(!card);
|
|
BUG_ON(!card->host);
|
|
|
|
memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
cmd.opcode = MMC_SEND_STATUS;
|
|
if (!mmc_host_is_spi(card->host))
|
|
cmd.arg = card->rca << 16;
|
|
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
|
|
|
|
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
|
|
if (err)
|
|
return err;
|
|
|
|
/* NOTE: callers are required to understand the difference
|
|
* between "native" and SPI format status words!
|
|
*/
|
|
if (status)
|
|
*status = cmd.resp[0];
|
|
|
|
return 0;
|
|
}
|
|
|