mirror of
https://github.com/torvalds/linux.git
synced 2024-11-02 10:11:36 +00:00
mmc: queue: Share mmc request array between partitions
eMMC can have multiple internal partitions that are represented as separate disks / queues. However switching between partitions is only done when the queue is empty. Consequently the array of mmc requests that are queued can be shared between partitions saving memory. Keep a pointer to the mmc request queue on the card, and use that instead of allocating a new one for each partition. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
parent
cdf8a6fb48
commit
7b410d074b
@ -2123,6 +2123,7 @@ static int mmc_blk_probe(struct mmc_card *card)
|
||||
{
|
||||
struct mmc_blk_data *md, *part_md;
|
||||
char cap_str[10];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Check that the card supports the command class(es) we need.
|
||||
@ -2132,9 +2133,15 @@ static int mmc_blk_probe(struct mmc_card *card)
|
||||
|
||||
mmc_fixup_device(card, mmc_blk_fixups);
|
||||
|
||||
ret = mmc_queue_alloc_shared_queue(card);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
md = mmc_blk_alloc(card);
|
||||
if (IS_ERR(md))
|
||||
if (IS_ERR(md)) {
|
||||
mmc_queue_free_shared_queue(card);
|
||||
return PTR_ERR(md);
|
||||
}
|
||||
|
||||
string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
|
||||
cap_str, sizeof(cap_str));
|
||||
@ -2172,6 +2179,7 @@ static int mmc_blk_probe(struct mmc_card *card)
|
||||
out:
|
||||
mmc_blk_remove_parts(card, md);
|
||||
mmc_blk_remove_req(md);
|
||||
mmc_queue_free_shared_queue(card);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2189,6 +2197,7 @@ static void mmc_blk_remove(struct mmc_card *card)
|
||||
pm_runtime_put_noidle(&card->dev);
|
||||
mmc_blk_remove_req(md);
|
||||
dev_set_drvdata(&card->dev, NULL);
|
||||
mmc_queue_free_shared_queue(card);
|
||||
}
|
||||
|
||||
static int _mmc_blk_suspend(struct mmc_card *card)
|
||||
|
@ -149,17 +149,13 @@ static void mmc_request_fn(struct request_queue *q)
|
||||
wake_up_process(mq->thread);
|
||||
}
|
||||
|
||||
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
|
||||
static struct scatterlist *mmc_alloc_sg(int sg_len)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
|
||||
sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
|
||||
if (!sg)
|
||||
*err = -ENOMEM;
|
||||
else {
|
||||
*err = 0;
|
||||
if (sg)
|
||||
sg_init_table(sg, sg_len);
|
||||
}
|
||||
|
||||
return sg;
|
||||
}
|
||||
@ -185,6 +181,32 @@ static void mmc_queue_setup_discard(struct request_queue *q,
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
|
||||
}
|
||||
|
||||
static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
|
||||
{
|
||||
kfree(mqrq->bounce_sg);
|
||||
mqrq->bounce_sg = NULL;
|
||||
|
||||
kfree(mqrq->sg);
|
||||
mqrq->sg = NULL;
|
||||
|
||||
kfree(mqrq->bounce_buf);
|
||||
mqrq->bounce_buf = NULL;
|
||||
}
|
||||
|
||||
static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qdepth; i++)
|
||||
mmc_queue_req_free_bufs(&mqrq[i]);
|
||||
}
|
||||
|
||||
static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
|
||||
{
|
||||
mmc_queue_reqs_free_bufs(mqrq, qdepth);
|
||||
kfree(mqrq);
|
||||
}
|
||||
|
||||
static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
|
||||
{
|
||||
struct mmc_queue_req *mqrq;
|
||||
@ -200,79 +222,137 @@ static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_BLOCK_BOUNCE
|
||||
static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
|
||||
unsigned int bouncesz)
|
||||
static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
|
||||
unsigned int bouncesz)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mq->qdepth; i++) {
|
||||
mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
|
||||
if (!mq->mqrq[i].bounce_buf)
|
||||
goto out_err;
|
||||
for (i = 0; i < qdepth; i++) {
|
||||
mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
|
||||
if (!mqrq[i].bounce_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
mqrq[i].sg = mmc_alloc_sg(1);
|
||||
if (!mqrq[i].sg)
|
||||
return -ENOMEM;
|
||||
|
||||
mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
|
||||
if (!mqrq[i].bounce_sg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
out_err:
|
||||
while (--i >= 0) {
|
||||
kfree(mq->mqrq[i].bounce_buf);
|
||||
mq->mqrq[i].bounce_buf = NULL;
|
||||
}
|
||||
pr_warn("%s: unable to allocate bounce buffers\n",
|
||||
mmc_card_name(mq->card));
|
||||
static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
|
||||
unsigned int bouncesz)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
|
||||
if (ret)
|
||||
mmc_queue_reqs_free_bufs(mqrq, qdepth);
|
||||
|
||||
return !ret;
|
||||
}
|
||||
|
||||
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
|
||||
{
|
||||
unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
|
||||
|
||||
if (host->max_segs != 1)
|
||||
return 0;
|
||||
|
||||
if (bouncesz > host->max_req_size)
|
||||
bouncesz = host->max_req_size;
|
||||
if (bouncesz > host->max_seg_size)
|
||||
bouncesz = host->max_seg_size;
|
||||
if (bouncesz > host->max_blk_count * 512)
|
||||
bouncesz = host->max_blk_count * 512;
|
||||
|
||||
if (bouncesz <= 512)
|
||||
return 0;
|
||||
|
||||
return bouncesz;
|
||||
}
|
||||
#else
|
||||
static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq,
|
||||
int qdepth, unsigned int bouncesz)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
|
||||
unsigned int bouncesz)
|
||||
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < mq->qdepth; i++) {
|
||||
mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
|
||||
static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
|
||||
int max_segs)
|
||||
{
|
||||
int i, ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mq->qdepth; i++) {
|
||||
mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
for (i = 0; i < qdepth; i++) {
|
||||
mqrq[i].sg = mmc_alloc_sg(max_segs);
|
||||
if (!mqrq[i].sg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
|
||||
void mmc_queue_free_shared_queue(struct mmc_card *card)
|
||||
{
|
||||
kfree(mqrq->bounce_sg);
|
||||
mqrq->bounce_sg = NULL;
|
||||
|
||||
kfree(mqrq->sg);
|
||||
mqrq->sg = NULL;
|
||||
|
||||
kfree(mqrq->bounce_buf);
|
||||
mqrq->bounce_buf = NULL;
|
||||
if (card->mqrq) {
|
||||
mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
|
||||
card->mqrq = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
|
||||
static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
|
||||
{
|
||||
int i;
|
||||
struct mmc_host *host = card->host;
|
||||
struct mmc_queue_req *mqrq;
|
||||
unsigned int bouncesz;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < mq->qdepth; i++)
|
||||
mmc_queue_req_free_bufs(&mq->mqrq[i]);
|
||||
if (card->mqrq)
|
||||
return -EINVAL;
|
||||
|
||||
mqrq = mmc_queue_alloc_mqrqs(qdepth);
|
||||
if (!mqrq)
|
||||
return -ENOMEM;
|
||||
|
||||
card->mqrq = mqrq;
|
||||
card->qdepth = qdepth;
|
||||
|
||||
bouncesz = mmc_queue_calc_bouncesz(host);
|
||||
|
||||
if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
|
||||
bouncesz = 0;
|
||||
pr_warn("%s: unable to allocate bounce buffers\n",
|
||||
mmc_card_name(card));
|
||||
}
|
||||
|
||||
card->bouncesz = bouncesz;
|
||||
|
||||
if (!bouncesz) {
|
||||
ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out_err:
|
||||
mmc_queue_free_shared_queue(card);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mmc_queue_alloc_shared_queue(struct mmc_card *card)
|
||||
{
|
||||
return __mmc_queue_alloc_shared_queue(card, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -289,7 +369,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
||||
{
|
||||
struct mmc_host *host = card->host;
|
||||
u64 limit = BLK_BOUNCE_HIGH;
|
||||
bool bounce = false;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
|
||||
@ -300,10 +379,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
||||
if (!mq->queue)
|
||||
return -ENOMEM;
|
||||
|
||||
mq->qdepth = 2;
|
||||
mq->mqrq = mmc_queue_alloc_mqrqs(mq->qdepth);
|
||||
if (!mq->mqrq)
|
||||
goto blk_cleanup;
|
||||
mq->mqrq = card->mqrq;
|
||||
mq->qdepth = card->qdepth;
|
||||
mq->queue->queuedata = mq;
|
||||
|
||||
blk_queue_prep_rq(mq->queue, mmc_prep_request);
|
||||
@ -312,44 +389,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
||||
if (mmc_can_erase(card))
|
||||
mmc_queue_setup_discard(mq->queue, card);
|
||||
|
||||
#ifdef CONFIG_MMC_BLOCK_BOUNCE
|
||||
if (host->max_segs == 1) {
|
||||
unsigned int bouncesz;
|
||||
|
||||
bouncesz = MMC_QUEUE_BOUNCESZ;
|
||||
|
||||
if (bouncesz > host->max_req_size)
|
||||
bouncesz = host->max_req_size;
|
||||
if (bouncesz > host->max_seg_size)
|
||||
bouncesz = host->max_seg_size;
|
||||
if (bouncesz > (host->max_blk_count * 512))
|
||||
bouncesz = host->max_blk_count * 512;
|
||||
|
||||
if (bouncesz > 512 &&
|
||||
mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
|
||||
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
|
||||
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
|
||||
blk_queue_max_segments(mq->queue, bouncesz / 512);
|
||||
blk_queue_max_segment_size(mq->queue, bouncesz);
|
||||
|
||||
ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
|
||||
if (ret)
|
||||
goto cleanup_queue;
|
||||
bounce = true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!bounce) {
|
||||
if (card->bouncesz) {
|
||||
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
|
||||
blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
|
||||
blk_queue_max_segments(mq->queue, card->bouncesz / 512);
|
||||
blk_queue_max_segment_size(mq->queue, card->bouncesz);
|
||||
} else {
|
||||
blk_queue_bounce_limit(mq->queue, limit);
|
||||
blk_queue_max_hw_sectors(mq->queue,
|
||||
min(host->max_blk_count, host->max_req_size / 512));
|
||||
blk_queue_max_segments(mq->queue, host->max_segs);
|
||||
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
||||
|
||||
ret = mmc_queue_alloc_sgs(mq, host->max_segs);
|
||||
if (ret)
|
||||
goto cleanup_queue;
|
||||
}
|
||||
|
||||
sema_init(&mq->thread_sem, 1);
|
||||
@ -364,11 +414,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_queue:
|
||||
mmc_queue_reqs_free_bufs(mq);
|
||||
kfree(mq->mqrq);
|
||||
cleanup_queue:
|
||||
mq->mqrq = NULL;
|
||||
blk_cleanup:
|
||||
blk_cleanup_queue(mq->queue);
|
||||
return ret;
|
||||
}
|
||||
@ -390,10 +437,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
||||
blk_start_queue(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
mmc_queue_reqs_free_bufs(mq);
|
||||
kfree(mq->mqrq);
|
||||
mq->mqrq = NULL;
|
||||
|
||||
mq->card = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_cleanup_queue);
|
||||
|
@ -51,6 +51,8 @@ struct mmc_queue {
|
||||
unsigned long qslots;
|
||||
};
|
||||
|
||||
extern int mmc_queue_alloc_shared_queue(struct mmc_card *card);
|
||||
extern void mmc_queue_free_shared_queue(struct mmc_card *card);
|
||||
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
|
||||
const char *);
|
||||
extern void mmc_cleanup_queue(struct mmc_queue *);
|
||||
|
@ -208,6 +208,7 @@ struct sdio_cis {
|
||||
struct mmc_host;
|
||||
struct sdio_func;
|
||||
struct sdio_func_tuple;
|
||||
struct mmc_queue_req;
|
||||
|
||||
#define SDIO_MAX_FUNCS 7
|
||||
|
||||
@ -300,6 +301,10 @@ struct mmc_card {
|
||||
struct dentry *debugfs_root;
|
||||
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
|
||||
unsigned int nr_parts;
|
||||
|
||||
struct mmc_queue_req *mqrq; /* Shared queue structure */
|
||||
unsigned int bouncesz; /* Bounce buffer size */
|
||||
int qdepth; /* Shared queue depth */
|
||||
};
|
||||
|
||||
static inline bool mmc_large_sector(struct mmc_card *card)
|
||||
|
Loading…
Reference in New Issue
Block a user