forked from Minki/linux
7b410d074b
eMMC can have multiple internal partitions that are represented as separate disks / queues. However switching between partitions is only done when the queue is empty. Consequently the array of mmc requests that are queued can be shared between partitions saving memory. Keep a pointer to the mmc request queue on the card, and use that instead of allocating a new one for each partition. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
74 lines
1.8 KiB
C
74 lines
1.8 KiB
C
#ifndef MMC_QUEUE_H
|
|
#define MMC_QUEUE_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/mmc/core.h>
|
|
#include <linux/mmc/host.h>
|
|
|
|
static inline bool mmc_req_is_special(struct request *req)
|
|
{
|
|
return req &&
|
|
(req_op(req) == REQ_OP_FLUSH ||
|
|
req_op(req) == REQ_OP_DISCARD ||
|
|
req_op(req) == REQ_OP_SECURE_ERASE);
|
|
}
|
|
|
|
struct task_struct;
|
|
struct mmc_blk_data;
|
|
|
|
struct mmc_blk_request {
|
|
struct mmc_request mrq;
|
|
struct mmc_command sbc;
|
|
struct mmc_command cmd;
|
|
struct mmc_command stop;
|
|
struct mmc_data data;
|
|
int retune_retry_done;
|
|
};
|
|
|
|
struct mmc_queue_req {
|
|
struct request *req;
|
|
struct mmc_blk_request brq;
|
|
struct scatterlist *sg;
|
|
char *bounce_buf;
|
|
struct scatterlist *bounce_sg;
|
|
unsigned int bounce_sg_len;
|
|
struct mmc_async_req areq;
|
|
int task_id;
|
|
};
|
|
|
|
struct mmc_queue {
|
|
struct mmc_card *card;
|
|
struct task_struct *thread;
|
|
struct semaphore thread_sem;
|
|
bool suspended;
|
|
bool asleep;
|
|
struct mmc_blk_data *blkdata;
|
|
struct request_queue *queue;
|
|
struct mmc_queue_req *mqrq;
|
|
int qdepth;
|
|
int qcnt;
|
|
unsigned long qslots;
|
|
};
|
|
|
|
extern int mmc_queue_alloc_shared_queue(struct mmc_card *card);
|
|
extern void mmc_queue_free_shared_queue(struct mmc_card *card);
|
|
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
|
|
const char *);
|
|
extern void mmc_cleanup_queue(struct mmc_queue *);
|
|
extern void mmc_queue_suspend(struct mmc_queue *);
|
|
extern void mmc_queue_resume(struct mmc_queue *);
|
|
|
|
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
|
|
struct mmc_queue_req *);
|
|
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
|
|
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
|
|
|
|
extern int mmc_access_rpmb(struct mmc_queue *);
|
|
|
|
extern struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *,
|
|
struct request *);
|
|
extern void mmc_queue_req_free(struct mmc_queue *, struct mmc_queue_req *);
|
|
|
|
#endif
|