Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc:
  sdhci: highmem capable PIO routines
  sg: reimplement sg mapping iterator
  mmc_test: print message when attaching to card
  mmc: Remove Russell as primecell mci maintainer
  mmc_block: bounce buffer highmem support
  sdhci: fix bad warning from commit c8b3e02
  sdhci: add warnings for bad buffers in ADMA path
  mmc_test: test oversized sg lists
  mmc_test: highmem tests
  s3cmci: ensure host stopped on machine shutdown
  au1xmmc: suspend/resume implementation
  s3cmci: fixes for section mismatch warnings
  pxamci: trivial fix of DMA alignment register bit clearing
This commit is contained in:
Linus Torvalds 2008-07-23 12:04:34 -07:00
commit 20b7997e8a
10 changed files with 567 additions and 242 deletions

View File

@ -441,10 +441,7 @@ M: spyro@f2s.com
S: Maintained
ARM PRIMECELL MMCI PL180/1 DRIVER
P: Russell King
M: rmk@arm.linux.org.uk
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
S: Maintained
S: Orphan
ARM/ADI ROADRUNNER MACHINE SUPPORT
P: Lennert Buytenhek

View File

@ -21,13 +21,17 @@
#define RESULT_UNSUP_HOST 2
#define RESULT_UNSUP_CARD 3
#define BUFFER_SIZE (PAGE_SIZE * 4)
#define BUFFER_ORDER 2
#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
struct mmc_test_card {
struct mmc_card *card;
u8 scratch[BUFFER_SIZE];
u8 *buffer;
#ifdef CONFIG_HIGHMEM
struct page *highmem;
#endif
};
/*******************************************************************/
@ -384,14 +388,16 @@ static int mmc_test_transfer(struct mmc_test_card *test,
int ret, i;
unsigned long flags;
BUG_ON(blocks * blksz > BUFFER_SIZE);
if (write) {
for (i = 0;i < blocks * blksz;i++)
test->scratch[i] = i;
} else {
memset(test->scratch, 0, BUFFER_SIZE);
memset(test->scratch, 0, blocks * blksz);
}
local_irq_save(flags);
sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
sg_copy_from_buffer(sg, sg_len, test->scratch, blocks * blksz);
local_irq_restore(flags);
ret = mmc_test_set_blksize(test, blksz);
@ -438,7 +444,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
}
} else {
local_irq_save(flags);
sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
sg_copy_to_buffer(sg, sg_len, test->scratch, blocks * blksz);
local_irq_restore(flags);
for (i = 0;i < blocks * blksz;i++) {
if (test->scratch[i] != (u8)i)
@ -799,6 +805,157 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
return 0;
}
static int mmc_test_bigsg_write(struct mmc_test_card *test)
{
int ret;
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
memset(test->buffer, 0, BUFFER_SIZE);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_table(&sg, 1);
sg_init_one(&sg, test->buffer, BUFFER_SIZE);
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
if (ret)
return ret;
return 0;
}
static int mmc_test_bigsg_read(struct mmc_test_card *test)
{
int ret, i;
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
memset(test->buffer, 0xCD, BUFFER_SIZE);
sg_init_table(&sg, 1);
sg_init_one(&sg, test->buffer, BUFFER_SIZE);
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
if (ret)
return ret;
/* mmc_test_transfer() doesn't check for read overflows */
for (i = size;i < BUFFER_SIZE;i++) {
if (test->buffer[i] != 0xCD)
return RESULT_FAIL;
}
return 0;
}
#ifdef CONFIG_HIGHMEM
static int mmc_test_write_high(struct mmc_test_card *test)
{
int ret;
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, 512, 0);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
if (ret)
return ret;
return 0;
}
static int mmc_test_read_high(struct mmc_test_card *test)
{
int ret;
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, 512, 0);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
if (ret)
return ret;
return 0;
}
static int mmc_test_multi_write_high(struct mmc_test_card *test)
{
int ret;
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
if (ret)
return ret;
return 0;
}
static int mmc_test_multi_read_high(struct mmc_test_card *test)
{
int ret;
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
if (ret)
return ret;
return 0;
}
#endif /* CONFIG_HIGHMEM */
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@ -913,6 +1070,53 @@ static const struct mmc_test_case mmc_test_cases[] = {
.name = "Correct xfer_size at read (midway failure)",
.run = mmc_test_multi_xfersize_read,
},
{
.name = "Over-sized SG list write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_bigsg_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Over-sized SG list read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_bigsg_read,
.cleanup = mmc_test_cleanup,
},
#ifdef CONFIG_HIGHMEM
{
.name = "Highmem write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_write_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Highmem read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_read_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block highmem write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_multi_write_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block highmem read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_multi_read_high,
.cleanup = mmc_test_cleanup,
},
#endif /* CONFIG_HIGHMEM */
};
static struct mutex mmc_test_lock;
@ -1014,12 +1218,23 @@ static ssize_t mmc_test_store(struct device *dev,
test->card = card;
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
#ifdef CONFIG_HIGHMEM
test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
#endif
#ifdef CONFIG_HIGHMEM
if (test->buffer && test->highmem) {
#else
if (test->buffer) {
#endif
mutex_lock(&mmc_test_lock);
mmc_test_run(test, testcase);
mutex_unlock(&mmc_test_lock);
}
#ifdef CONFIG_HIGHMEM
__free_pages(test->highmem, BUFFER_ORDER);
#endif
kfree(test->buffer);
kfree(test);
@ -1041,6 +1256,8 @@ static int mmc_test_probe(struct mmc_card *card)
if (ret)
return ret;
dev_info(&card->dev, "Card claimed for testing.\n");
return 0;
}

View File

@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
printk(KERN_WARNING "%s: unable to allocate "
"bounce buffer\n", mmc_card_name(card));
} else {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
struct scatterlist *src, unsigned int src_len)
{
unsigned int chunk;
char *dst_buf, *src_buf;
unsigned int dst_size, src_size;
dst_buf = NULL;
src_buf = NULL;
dst_size = 0;
src_size = 0;
while (src_len) {
BUG_ON(dst_len == 0);
if (dst_size == 0) {
dst_buf = sg_virt(dst);
dst_size = dst->length;
}
if (src_size == 0) {
src_buf = sg_virt(src);
src_size = src->length;
}
chunk = min(dst_size, src_size);
memcpy(dst_buf, src_buf, chunk);
dst_buf += chunk;
src_buf += chunk;
dst_size -= chunk;
src_size -= chunk;
if (dst_size == 0) {
dst++;
dst_len--;
}
if (src_size == 0) {
src++;
src_len--;
}
}
}
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
if (!mq->bounce_buf)
return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
mq->bounce_sg_len = sg_len;
/*
* Shortcut in the event we only get a single entry.
*/
if (sg_len == 1) {
memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
return 1;
}
buflen = 0;
for_each_sg(mq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
sg_init_one(mq->sg, mq->bounce_buf, 0);
while (sg_len) {
mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
sg_len--;
}
sg_init_one(mq->sg, mq->bounce_buf, buflen);
return 1;
}
/*
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
void mmc_queue_bounce_pre(struct mmc_queue *mq)
{
unsigned long flags;
if (!mq->bounce_buf)
return;
if (mq->bounce_sg_len == 1)
return;
if (rq_data_dir(mq->req) != WRITE)
return;
copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
local_irq_save(flags);
sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
mq->bounce_buf, mq->sg[0].length);
local_irq_restore(flags);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
void mmc_queue_bounce_post(struct mmc_queue *mq)
{
unsigned long flags;
if (!mq->bounce_buf)
return;
if (mq->bounce_sg_len == 1)
return;
if (rq_data_dir(mq->req) != READ)
return;
copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
local_irq_save(flags);
sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
mq->bounce_buf, mq->sg[0].length);
local_irq_restore(flags);
}

View File

@ -1043,7 +1043,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
goto out6;
}
platform_set_drvdata(pdev, mmc);
platform_set_drvdata(pdev, host);
printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
" (mode=%s)\n", pdev->id, host->iobase,
@ -1087,13 +1087,10 @@ out0:
static int __devexit au1xmmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct au1xmmc_host *host;
struct au1xmmc_host *host = platform_get_drvdata(pdev);
if (mmc) {
host = mmc_priv(mmc);
mmc_remove_host(mmc);
if (host) {
mmc_remove_host(host->mmc);
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led)
@ -1101,8 +1098,8 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
#endif
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
!(host->mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(host->mmc, 0);
au_writel(0, HOST_ENABLE(host));
au_writel(0, HOST_CONFIG(host));
@ -1122,16 +1119,49 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
release_resource(host->ioarea);
kfree(host->ioarea);
mmc_free_host(mmc);
mmc_free_host(host->mmc);
platform_set_drvdata(pdev, NULL);
}
return 0;
}
#ifdef CONFIG_PM
static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
int ret;
ret = mmc_suspend_host(host->mmc, state);
if (ret)
return ret;
au_writel(0, HOST_CONFIG2(host));
au_writel(0, HOST_CONFIG(host));
au_writel(0xffffffff, HOST_STATUS(host));
au_writel(0, HOST_ENABLE(host));
au_sync();
return 0;
}
static int au1xmmc_resume(struct platform_device *pdev)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
au1xmmc_reset_controller(host);
return mmc_resume_host(host->mmc);
}
#else
#define au1xmmc_suspend NULL
#define au1xmmc_resume NULL
#endif
static struct platform_driver au1xmmc_driver = {
.probe = au1xmmc_probe,
.remove = au1xmmc_remove,
.suspend = NULL,
.resume = NULL,
.suspend = au1xmmc_suspend,
.resume = au1xmmc_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,

View File

@ -177,7 +177,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
if (dalgn)
DALGN |= (1 << host->dma);
else
DALGN &= (1 << host->dma);
DALGN &= ~(1 << host->dma);
DDADR(host->dma) = host->sg_dma;
DCSR(host->dma) = DCSR_RUN;
}

View File

@ -1331,21 +1331,30 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
return ret;
}
static void s3cmci_shutdown(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct s3cmci_host *host = mmc_priv(mmc);
if (host->irq_cd >= 0)
free_irq(host->irq_cd, host);
mmc_remove_host(mmc);
clk_disable(host->clk);
}
static int __devexit s3cmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct s3cmci_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
s3cmci_shutdown(pdev);
clk_disable(host->clk);
clk_put(host->clk);
tasklet_disable(&host->pio_tasklet);
s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client);
if (host->irq_cd >= 0)
free_irq(host->irq_cd, host);
free_irq(host->irq, host);
iounmap(host->base);
@ -1355,17 +1364,17 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
return 0;
}
static int __devinit s3cmci_probe_2410(struct platform_device *dev)
static int __devinit s3cmci_2410_probe(struct platform_device *dev)
{
return s3cmci_probe(dev, 0);
}
static int __devinit s3cmci_probe_2412(struct platform_device *dev)
static int __devinit s3cmci_2412_probe(struct platform_device *dev)
{
return s3cmci_probe(dev, 1);
}
static int __devinit s3cmci_probe_2440(struct platform_device *dev)
static int __devinit s3cmci_2440_probe(struct platform_device *dev)
{
return s3cmci_probe(dev, 1);
}
@ -1392,29 +1401,32 @@ static int s3cmci_resume(struct platform_device *dev)
#endif /* CONFIG_PM */
static struct platform_driver s3cmci_driver_2410 = {
static struct platform_driver s3cmci_2410_driver = {
.driver.name = "s3c2410-sdi",
.driver.owner = THIS_MODULE,
.probe = s3cmci_probe_2410,
.probe = s3cmci_2410_probe,
.remove = __devexit_p(s3cmci_remove),
.shutdown = s3cmci_shutdown,
.suspend = s3cmci_suspend,
.resume = s3cmci_resume,
};
static struct platform_driver s3cmci_driver_2412 = {
static struct platform_driver s3cmci_2412_driver = {
.driver.name = "s3c2412-sdi",
.driver.owner = THIS_MODULE,
.probe = s3cmci_probe_2412,
.probe = s3cmci_2412_probe,
.remove = __devexit_p(s3cmci_remove),
.shutdown = s3cmci_shutdown,
.suspend = s3cmci_suspend,
.resume = s3cmci_resume,
};
static struct platform_driver s3cmci_driver_2440 = {
static struct platform_driver s3cmci_2440_driver = {
.driver.name = "s3c2440-sdi",
.driver.owner = THIS_MODULE,
.probe = s3cmci_probe_2440,
.probe = s3cmci_2440_probe,
.remove = __devexit_p(s3cmci_remove),
.shutdown = s3cmci_shutdown,
.suspend = s3cmci_suspend,
.resume = s3cmci_resume,
};
@ -1422,17 +1434,17 @@ static struct platform_driver s3cmci_driver_2440 = {
static int __init s3cmci_init(void)
{
platform_driver_register(&s3cmci_driver_2410);
platform_driver_register(&s3cmci_driver_2412);
platform_driver_register(&s3cmci_driver_2440);
platform_driver_register(&s3cmci_2410_driver);
platform_driver_register(&s3cmci_2412_driver);
platform_driver_register(&s3cmci_2440_driver);
return 0;
}
static void __exit s3cmci_exit(void)
{
platform_driver_unregister(&s3cmci_driver_2410);
platform_driver_unregister(&s3cmci_driver_2412);
platform_driver_unregister(&s3cmci_driver_2440);
platform_driver_unregister(&s3cmci_2410_driver);
platform_driver_unregister(&s3cmci_2412_driver);
platform_driver_unregister(&s3cmci_2440_driver);
}
module_init(s3cmci_init);

View File

@ -173,119 +173,95 @@ static void sdhci_led_control(struct led_classdev *led,
* *
\*****************************************************************************/
static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
{
return sg_virt(host->cur_sg);
}
static inline int sdhci_next_sg(struct sdhci_host* host)
{
/*
* Skip to next SG entry.
*/
host->cur_sg++;
host->num_sg--;
/*
* Any entries left?
*/
if (host->num_sg > 0) {
host->offset = 0;
host->remain = host->cur_sg->length;
}
return host->num_sg;
}
static void sdhci_read_block_pio(struct sdhci_host *host)
{
int blksize, chunk_remain;
u32 data;
char *buffer;
int size;
unsigned long flags;
size_t blksize, len, chunk;
u32 scratch;
u8 *buf;
DBG("PIO reading\n");
blksize = host->data->blksz;
chunk_remain = 0;
data = 0;
chunk = 0;
buffer = sdhci_sg_to_buffer(host) + host->offset;
local_irq_save(flags);
while (blksize) {
if (chunk_remain == 0) {
data = readl(host->ioaddr + SDHCI_BUFFER);
chunk_remain = min(blksize, 4);
}
if (!sg_miter_next(&host->sg_miter))
BUG();
size = min(host->remain, chunk_remain);
len = min(host->sg_miter.length, blksize);
chunk_remain -= size;
blksize -= size;
host->offset += size;
host->remain -= size;
blksize -= len;
host->sg_miter.consumed = len;
while (size) {
*buffer = data & 0xFF;
buffer++;
data >>= 8;
size--;
}
buf = host->sg_miter.addr;
if (host->remain == 0) {
if (sdhci_next_sg(host) == 0) {
BUG_ON(blksize != 0);
return;
while (len) {
if (chunk == 0) {
scratch = readl(host->ioaddr + SDHCI_BUFFER);
chunk = 4;
}
buffer = sdhci_sg_to_buffer(host);
*buf = scratch & 0xFF;
buf++;
scratch >>= 8;
chunk--;
len--;
}
}
sg_miter_stop(&host->sg_miter);
local_irq_restore(flags);
}
static void sdhci_write_block_pio(struct sdhci_host *host)
{
int blksize, chunk_remain;
u32 data;
char *buffer;
int bytes, size;
unsigned long flags;
size_t blksize, len, chunk;
u32 scratch;
u8 *buf;
DBG("PIO writing\n");
blksize = host->data->blksz;
chunk_remain = 4;
data = 0;
chunk = 0;
scratch = 0;
bytes = 0;
buffer = sdhci_sg_to_buffer(host) + host->offset;
local_irq_save(flags);
while (blksize) {
size = min(host->remain, chunk_remain);
if (!sg_miter_next(&host->sg_miter))
BUG();
chunk_remain -= size;
blksize -= size;
host->offset += size;
host->remain -= size;
len = min(host->sg_miter.length, blksize);
while (size) {
data >>= 8;
data |= (u32)*buffer << 24;
buffer++;
size--;
}
blksize -= len;
host->sg_miter.consumed = len;
if (chunk_remain == 0) {
writel(data, host->ioaddr + SDHCI_BUFFER);
chunk_remain = min(blksize, 4);
}
buf = host->sg_miter.addr;
if (host->remain == 0) {
if (sdhci_next_sg(host) == 0) {
BUG_ON(blksize != 0);
return;
while (len) {
scratch |= (u32)*buf << (chunk * 8);
buf++;
chunk++;
len--;
if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
writel(scratch, host->ioaddr + SDHCI_BUFFER);
chunk = 0;
scratch = 0;
}
buffer = sdhci_sg_to_buffer(host);
}
}
sg_miter_stop(&host->sg_miter);
local_irq_restore(flags);
}
static void sdhci_transfer_pio(struct sdhci_host *host)
@ -294,7 +270,7 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
BUG_ON(!host->data);
if (host->num_sg == 0)
if (host->blocks == 0)
return;
if (host->data->flags & MMC_DATA_READ)
@ -308,7 +284,8 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
else
sdhci_write_block_pio(host);
if (host->num_sg == 0)
host->blocks--;
if (host->blocks == 0)
break;
}
@ -389,6 +366,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
if (offset) {
if (data->flags & MMC_DATA_WRITE) {
buffer = sdhci_kmap_atomic(sg, &flags);
WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
memcpy(align, buffer, offset);
sdhci_kunmap_atomic(buffer, &flags);
}
@ -510,6 +488,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
size = 4 - (sg_dma_address(sg) & 0x3);
buffer = sdhci_kmap_atomic(sg, &flags);
WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
memcpy(buffer, align, size);
sdhci_kunmap_atomic(buffer, &flags);
@ -687,7 +666,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
WARN_ON(1);
host->flags &= ~SDHCI_USE_DMA;
} else {
WARN_ON(count != 1);
WARN_ON(sg_cnt != 1);
writel(sg_dma_address(data->sg),
host->ioaddr + SDHCI_DMA_ADDRESS);
}
@ -711,11 +690,9 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
}
if (!(host->flags & SDHCI_REQ_USE_DMA)) {
host->cur_sg = data->sg;
host->num_sg = data->sg_len;
host->offset = 0;
host->remain = host->cur_sg->length;
sg_miter_start(&host->sg_miter,
data->sg, data->sg_len, SG_MITER_ATOMIC);
host->blocks = data->blocks;
}
/* We do not handle DMA boundaries, so set it to max (512 KiB) */
@ -1581,9 +1558,15 @@ int sdhci_add_host(struct sdhci_host *host)
}
}
/* XXX: Hack to get MMC layer to avoid highmem */
if (!(host->flags & SDHCI_USE_DMA))
mmc_dev(host->mmc)->dma_mask = NULL;
/*
* If we use DMA, then it's up to the caller to set the DMA
* mask, but PIO does not need the hw shim so we set a new
* mask here in that case.
*/
if (!(host->flags & SDHCI_USE_DMA)) {
host->dma_mask = DMA_BIT_MASK(64);
mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
}
host->max_clk =
(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;

View File

@ -212,6 +212,7 @@ struct sdhci_host {
/* Internal data */
struct mmc_host *mmc; /* MMC structure */
u64 dma_mask; /* custom DMA mask */
#ifdef CONFIG_LEDS_CLASS
struct led_classdev led; /* LED control */
@ -238,10 +239,8 @@ struct sdhci_host {
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
struct scatterlist *cur_sg; /* We're working on this */
int num_sg; /* Entries left */
int offset; /* Offset into current sg */
int remain; /* Bytes left in current */
struct sg_mapping_iter sg_miter; /* SG state for PIO */
unsigned int blocks; /* remaining PIO blocks */
int sg_count; /* Mapped sg entries */

View File

@ -224,4 +224,42 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
*/
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
/*
* Mapping sg iterator
*
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
* long as an interation is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
* can be adjusted if the user can't consume all the bytes in one go.
* Also, a stopped iteration can be resumed by calling next on it.
* This is useful when iteration needs to release all resources and
* continue later (e.g. at the next interrupt).
*/
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
struct sg_mapping_iter {
/* the following three fields can be accessed directly */
struct page *page; /* currently mapped page */
void *addr; /* pointer to the mapped area */
size_t length; /* length of the mapped area */
size_t consumed; /* number of consumed bytes */
/* these are internal states, keep away */
struct scatterlist *__sg; /* current entry */
unsigned int __nents; /* nr of remaining entries */
unsigned int __offset; /* offset within sg */
unsigned int __flags;
};
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags);
bool sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
#endif /* _LINUX_SCATTERLIST_H */

View File

@ -294,6 +294,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
}
EXPORT_SYMBOL(sg_alloc_table);
/**
* sg_miter_start - start mapping iteration over a sg list
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
* @nents: number of sg entries
*
* Description:
* Starts mapping iterator @miter.
*
* Context:
* Don't care.
*/
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags)
{
memset(miter, 0, sizeof(struct sg_mapping_iter));
miter->__sg = sgl;
miter->__nents = nents;
miter->__offset = 0;
miter->__flags = flags;
}
EXPORT_SYMBOL(sg_miter_start);
/**
* sg_miter_next - proceed mapping iterator to the next mapping
* @miter: sg mapping iter to proceed
*
* Description:
* Proceeds @miter@ to the next mapping. @miter@ should have been
* started using sg_miter_start(). On successful return,
* @miter@->page, @miter@->addr and @miter@->length point to the
* current mapping.
*
* Context:
* IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
* @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
* list is reached.
*/
bool sg_miter_next(struct sg_mapping_iter *miter)
{
unsigned int off, len;
/* check for end and drop resources from the last iteration */
if (!miter->__nents)
return false;
sg_miter_stop(miter);
/* get to the next sg if necessary. __offset is adjusted by stop */
if (miter->__offset == miter->__sg->length && --miter->__nents) {
miter->__sg = sg_next(miter->__sg);
miter->__offset = 0;
}
/* map the next page */
off = miter->__sg->offset + miter->__offset;
len = miter->__sg->length - miter->__offset;
miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
off &= ~PAGE_MASK;
miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
miter->consumed = miter->length;
if (miter->__flags & SG_MITER_ATOMIC)
miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
else
miter->addr = kmap(miter->page) + off;
return true;
}
EXPORT_SYMBOL(sg_miter_next);
/**
* sg_miter_stop - stop mapping iteration
* @miter: sg mapping iter to be stopped
*
* Description:
* Stops mapping iterator @miter. @miter should have been started
* started using sg_miter_start(). A stopped iteration can be
* resumed by calling sg_miter_next() on it. This is useful when
* resources (kmap) need to be released during iteration.
*
* Context:
* IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
*/
void sg_miter_stop(struct sg_mapping_iter *miter)
{
WARN_ON(miter->consumed > miter->length);
/* drop resources from the last iteration */
if (miter->addr) {
miter->__offset += miter->consumed;
if (miter->__flags & SG_MITER_ATOMIC) {
WARN_ON(!irqs_disabled());
kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
} else
kunmap(miter->addr);
miter->page = NULL;
miter->addr = NULL;
miter->length = 0;
miter->consumed = 0;
}
}
EXPORT_SYMBOL(sg_miter_stop);
/**
* sg_copy_buffer - Copy data between a linear buffer and an SG list
* @sgl: The SG list
@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table);
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, int to_buffer)
{
struct scatterlist *sg;
size_t buf_off = 0;
int i;
unsigned int offset = 0;
struct sg_mapping_iter miter;
WARN_ON(!irqs_disabled());
sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
for_each_sg(sgl, sg, nents, i) {
struct page *page;
int n = 0;
unsigned int sg_off = sg->offset;
unsigned int sg_copy = sg->length;
while (sg_miter_next(&miter) && offset < buflen) {
unsigned int len;
if (sg_copy > buflen)
sg_copy = buflen;
buflen -= sg_copy;
len = min(miter.length, buflen - offset);
while (sg_copy > 0) {
unsigned int page_copy;
void *p;
page_copy = PAGE_SIZE - sg_off;
if (page_copy > sg_copy)
page_copy = sg_copy;
page = nth_page(sg_page(sg), n);
p = kmap_atomic(page, KM_BIO_SRC_IRQ);
if (to_buffer)
memcpy(buf + buf_off, p + sg_off, page_copy);
else {
memcpy(p + sg_off, buf + buf_off, page_copy);
flush_kernel_dcache_page(page);
}
kunmap_atomic(p, KM_BIO_SRC_IRQ);
buf_off += page_copy;
sg_off += page_copy;
if (sg_off == PAGE_SIZE) {
sg_off = 0;
n++;
}
sg_copy -= page_copy;
if (to_buffer)
memcpy(buf + offset, miter.addr, len);
else {
memcpy(miter.addr, buf + offset, len);
flush_kernel_dcache_page(miter.page);
}
if (!buflen)
break;
offset += len;
}
return buf_off;
sg_miter_stop(&miter);
return offset;
}
/**