I've had it with this code now. The packed command support is a complex hurdle in the MMC/SD block layer, around 500+ lines of code which was introduced in 2013 in commitce39f9d17c
("mmc: support packed write command for eMMC4.5 devices") commitabd9ac1449
("mmc: add packed command feature of eMMC4.5") ...and since then it has been rotting. The original author of the code has disappeared from the community and the mail address is bouncing. For the code to be exercised the host must flag that it supports packed commands, so in mmc_blk_prep_packed_list() which is called for every single request, the following construction appears: u8 max_packed_rw = 0; if ((rq_data_dir(cur) == WRITE) && mmc_host_packed_wr(card->host)) max_packed_rw = card->ext_csd.max_packed_writes; if (max_packed_rw == 0) goto no_packed; This has the following logical deductions: - Only WRITE commands can really be packed, so the solution is only half-done: we support packed WRITE but not packed READ. The packed command support has not been finalized by supporting reads in three years! - mmc_host_packed_wr() is just a static inline that checks host->caps2 & MMC_CAP2_PACKED_WR. The problem with this is that NO upstream host sets this capability flag! No driver in the kernel is using it, and we can't test it. Packed command may be supported in out-of-tree code, but I doubt it. I doubt that the code is even working anymore due to other refactorings in the MMC block layer, who would notice if patches affecting it broke packed commands? No one. - There is no Device Tree binding or code to mark a host as supporting packed read or write commands, just this flag in caps2, so for sure there are not any DT systems using it either. It has other problems as well: mmc_blk_prep_packed_list() is speculatively picking requests out of the request queue with blk_fetch_request() making the MMC/SD stack harder to convert to the multiqueue block layer. By this we get rid of an obstacle. The way I see it this is just cruft littering the MMC/SD stack. Cc: Namjae Jeon <namjae.jeon@samsung.com> Cc: Maya Erez <qca_merez@qca.qualcomm.com> Acked-by: Jaehoon Chung <jh80.chung@samsung.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
63 lines
1.5 KiB
C
63 lines
1.5 KiB
C
#ifndef MMC_QUEUE_H
|
|
#define MMC_QUEUE_H
|
|
|
|
static inline bool mmc_req_is_special(struct request *req)
|
|
{
|
|
return req &&
|
|
(req_op(req) == REQ_OP_FLUSH ||
|
|
req_op(req) == REQ_OP_DISCARD ||
|
|
req_op(req) == REQ_OP_SECURE_ERASE);
|
|
}
|
|
|
|
struct request;
|
|
struct task_struct;
|
|
struct mmc_blk_data;
|
|
|
|
struct mmc_blk_request {
|
|
struct mmc_request mrq;
|
|
struct mmc_command sbc;
|
|
struct mmc_command cmd;
|
|
struct mmc_command stop;
|
|
struct mmc_data data;
|
|
int retune_retry_done;
|
|
};
|
|
|
|
struct mmc_queue_req {
|
|
struct request *req;
|
|
struct mmc_blk_request brq;
|
|
struct scatterlist *sg;
|
|
char *bounce_buf;
|
|
struct scatterlist *bounce_sg;
|
|
unsigned int bounce_sg_len;
|
|
struct mmc_async_req mmc_active;
|
|
};
|
|
|
|
struct mmc_queue {
|
|
struct mmc_card *card;
|
|
struct task_struct *thread;
|
|
struct semaphore thread_sem;
|
|
unsigned int flags;
|
|
#define MMC_QUEUE_SUSPENDED (1 << 0)
|
|
#define MMC_QUEUE_NEW_REQUEST (1 << 1)
|
|
struct mmc_blk_data *blkdata;
|
|
struct request_queue *queue;
|
|
struct mmc_queue_req mqrq[2];
|
|
struct mmc_queue_req *mqrq_cur;
|
|
struct mmc_queue_req *mqrq_prev;
|
|
};
|
|
|
|
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
|
|
const char *);
|
|
extern void mmc_cleanup_queue(struct mmc_queue *);
|
|
extern void mmc_queue_suspend(struct mmc_queue *);
|
|
extern void mmc_queue_resume(struct mmc_queue *);
|
|
|
|
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
|
|
struct mmc_queue_req *);
|
|
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
|
|
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
|
|
|
|
extern int mmc_access_rpmb(struct mmc_queue *);
|
|
|
|
#endif
|