1e8e55b670
Add CQE support to the block driver, including: - optionally using DCMD for flush requests - "manually" issuing discard requests - issuing read / write requests to the CQE - supporting block-layer timeouts - handling recovery - supporting re-tuning CQE offers 25% - 50% better random multi-threaded I/O. There is a slight (e.g. 2%) drop in sequential read speed but no observable change to sequential write. CQE automatically sends the commands to complete requests. However it only supports reads / writes and so-called "direct commands" (DCMD). Furthermore DCMD is limited to one command at a time, but discards require 3 commands. That makes issuing discards through CQE very awkward, but some CQE's don't support DCMD anyway. So for discards, the existing non-CQE approach is taken, where the mmc core code issues the 3 commands one at a time i.e. mmc_erase(). Where DCMD is used, is for issuing flushes. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org>
22 lines
476 B
C
22 lines
476 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _MMC_CORE_BLOCK_H
|
|
#define _MMC_CORE_BLOCK_H
|
|
|
|
struct mmc_queue;
|
|
struct request;
|
|
|
|
void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
|
|
|
|
void mmc_blk_cqe_recovery(struct mmc_queue *mq);
|
|
|
|
enum mmc_issued;
|
|
|
|
enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
|
|
void mmc_blk_mq_complete(struct request *req);
|
|
|
|
struct work_struct;
|
|
|
|
void mmc_blk_mq_complete_work(struct work_struct *work);
|
|
|
|
#endif
|