From 5f3a86014eadbcf559ab64cf26ce29510319228b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 8 Nov 2017 15:34:54 +0900 Subject: [PATCH 01/96] mmc: slot-gpio: call gpiod_to_irq() only when MMC_CAP_NEEDS_POLL is unset It is not efficient to call gpiod_to_irq() regardless the flag, then ignore the returned irq if MMC_CAP_NEEDS_POLL. Move gpiod_to_irq() after the MMC_CAP_NEEDS_POLL check. Signed-off-by: Masahiro Yamada Signed-off-by: Ulf Hansson --- drivers/mmc/core/slot-gpio.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 863f1dbbfc1b..f7c6e0542de7 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -121,20 +121,18 @@ EXPORT_SYMBOL(mmc_gpio_request_ro); void mmc_gpiod_request_cd_irq(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; - int ret, irq; + int irq = -EINVAL; + int ret; if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio) return; - irq = gpiod_to_irq(ctx->cd_gpio); - /* - * Even if gpiod_to_irq() returns a valid IRQ number, the platform might - * still prefer to poll, e.g., because that IRQ number is already used - * by another unit and cannot be shared. + * Do not use IRQ if the platform prefers to poll, e.g., because that + * IRQ number is already used by another unit and cannot be shared. */ - if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL) - irq = -EINVAL; + if (!(host->caps & MMC_CAP_NEEDS_POLL)) + irq = gpiod_to_irq(ctx->cd_gpio); if (irq >= 0) { if (!ctx->cd_gpio_isr) From 97618aca1440b5addc5c3d78659d3e176be23b80 Mon Sep 17 00:00:00 2001 From: "yinbo.zhu" Date: Wed, 8 Nov 2017 17:09:50 +0800 Subject: [PATCH 02/96] mmc: sdhci-of-esdhc: fix eMMC couldn't work after kexec The bit eSDHC_TBCTL[TB_EN] couldn't be reset by eSDHC_SYSCTL[RSTA] which is used to reset for all. The driver should make sure it's cleared before card initialization, otherwise the initialization would fail. Signed-off-by: yinbo.zhu Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-esdhc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 1f424374bbbb..d74030f3bd12 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -785,6 +785,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) pltfm_host = sdhci_priv(host); esdhc = sdhci_pltfm_priv(pltfm_host); + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; From 96455380ece1c786f0a8822ea8b312e6445a3d93 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 14 Nov 2017 23:55:20 +0100 Subject: [PATCH 03/96] mmc: core: use usleep_range rather than HZ magic in mmc_delay() Documentation/timers/timers-howto.txt recommends to use usleep_range for delays 1-20ms. Let's adhere to it. No need for messing with HZ and still do busy looping these days. Signed-off-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/core/core.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 71e6c6d7ceb7..b2877e2d740f 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -62,12 +62,10 @@ void mmc_set_initial_state(struct mmc_host *host); static inline void mmc_delay(unsigned int ms) { - if (ms < 1000 / HZ) { - cond_resched(); - mdelay(ms); - } else { + if (ms <= 20) + usleep_range(ms * 1000, ms * 1250); + else msleep(ms); - } } void mmc_rescan(struct work_struct *work); From 754febcc6b749bb05ebb06b0b9cfdda6157e8cfd Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 14 Nov 2017 23:51:04 +0100 Subject: [PATCH 04/96] mmc: tmio: use usleep_range consistently There are a few udelay() left which are in a range that they should be usleep_range() these days. Signed-off-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc_core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 583bf3262df5..d6ca57be16c2 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -806,7 +806,7 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) if (ret == 0) set_bit(i, host->taps); - mdelay(1); + usleep_range(1000, 1200); } ret = host->select_tuning(host); @@ -958,7 +958,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) * 100us were not enough. Is this the same 140us delay, as in * tmio_mmc_set_ios()? */ - udelay(200); + usleep_range(200, 300); } /* * It seems, VccQ should be switched on after Vcc, this is also what the @@ -966,7 +966,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) */ if (!IS_ERR(mmc->supply.vqmmc) && !ret) { ret = regulator_enable(mmc->supply.vqmmc); - udelay(200); + usleep_range(200, 300); } if (ret < 0) @@ -1059,7 +1059,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } /* Let things settle. delay taken from winCE driver */ - udelay(140); + usleep_range(140, 200); if (PTR_ERR(host->mrq) == -EINTR) dev_dbg(&host->pdev->dev, "%s.%d: IOS interrupted: clk %u, mode %u", From 448f2f8775d1e8a62a14506b6da38bcedce5eb22 Mon Sep 17 00:00:00 2001 From: Ulrich Hecht Date: Wed, 15 Nov 2017 16:25:49 +0100 Subject: [PATCH 05/96] dt-bindings: mmc: renesas_sdhi: Add r8a77995 support Adds bindings for the R-Car D3 SoC's SDHI IP. Signed-off-by: Ulrich Hecht Reviewed-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: Ulf Hansson --- Documentation/devicetree/bindings/mmc/tmio_mmc.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt index 3c6762430fd9..d8685cb83325 100644 --- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt +++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt @@ -26,6 +26,7 @@ Required properties: "renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC "renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC "renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC + "renesas,sdhi-r8a77995" - SDHI IP on R8A77995 SoC "renesas,sdhi-shmobile" - a generic sh-mobile SDHI controller "renesas,rcar-gen1-sdhi" - a generic R-Car Gen1 SDHI controller "renesas,rcar-gen2-sdhi" - a generic R-Car Gen2 or RZ/G1 From 1907e38680af8f492f3da20cb36e3f33cfd971bf Mon Sep 17 00:00:00 2001 From: Adam Borowski Date: Tue, 28 Nov 2017 04:44:55 +0100 Subject: [PATCH 06/96] mmc: sunxi: fix mojibake in module metadata It had an U+FFFD: not a corrupted character but a literal well-formed replacement marker. Signed-off-by: Adam Borowski Acked-by: Maxime Ripard Signed-off-by: Ulf Hansson --- drivers/mmc/host/sunxi-mmc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index cc98355dbdb9..8fef5c17696e 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -3,7 +3,7 @@ * (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd. * (C) Copyright 2007-2011 Aaron Maoye * (C) Copyright 2013-2014 O2S GmbH - * (C) Copyright 2013-2014 David Lanzend�rfer + * (C) Copyright 2013-2014 David Lanzendörfer * (C) Copyright 2013-2014 Hans de Goede * (C) Copyright 2017 Sootech SA * @@ -1393,5 +1393,5 @@ module_platform_driver(sunxi_mmc_driver); MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver"); MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("David Lanzend�rfer "); +MODULE_AUTHOR("David Lanzendörfer "); MODULE_ALIAS("platform:sunxi-mmc"); From 4512bd370b111dd7ffc437ddd3179391df68fe1b Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:40:58 +0200 Subject: [PATCH 07/96] mmc: block: No need to export mmc_cleanup_queue() mmc_cleanup_queue() is not used by a different module. Do not export it. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/queue.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 4f33d277b125..26f8da30ebe5 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -270,7 +270,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq) mq->card = NULL; } -EXPORT_SYMBOL(mmc_cleanup_queue); /** * mmc_queue_suspend - suspend a MMC request queue From 41e3efd07d5a02c80f503e29d755aa1bbb4245de Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:40:59 +0200 Subject: [PATCH 08/96] mmc: block: Simplify cleaning up the queue Use blk_cleanup_queue() to shutdown the queue when the driver is removed, and instead get an extra reference to the queue to prevent the queue being freed before the final mmc_blk_put(). Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 17 ++++++++++++----- drivers/mmc/core/queue.c | 2 ++ 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index ccfa98af1dd3..e44f6d90aeb4 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -189,7 +189,7 @@ static void mmc_blk_put(struct mmc_blk_data *md) md->usage--; if (md->usage == 0) { int devidx = mmc_get_devidx(md->disk); - blk_cleanup_queue(md->queue.queue); + blk_put_queue(md->queue.queue); ida_simple_remove(&mmc_blk_ida, devidx); put_disk(md->disk); kfree(md); @@ -2156,6 +2156,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, md->queue.blkdata = md; + /* + * Keep an extra reference to the queue so that we can shutdown the + * queue (i.e. call blk_cleanup_queue()) while there are still + * references to the 'md'. The corresponding blk_put_queue() is in + * mmc_blk_put(). + */ + if (!blk_get_queue(md->queue.queue)) { + mmc_cleanup_queue(&md->queue); + goto err_putdisk; + } + md->disk->major = MMC_BLOCK_MAJOR; md->disk->first_minor = devidx * perdev_minors; md->disk->fops = &mmc_bdops; @@ -2471,10 +2482,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) * from being accepted. */ card = md->queue.card; - spin_lock_irq(md->queue.queue->queue_lock); - queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); - spin_unlock_irq(md->queue.queue->queue_lock); - blk_set_queue_dying(md->queue.queue); mmc_cleanup_queue(&md->queue); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 26f8da30ebe5..ae6d9da68735 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -268,6 +268,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq) blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); + blk_cleanup_queue(q); + mq->card = NULL; } From afab1bb8b40c61458e009fdc323c9740f95fcd5b Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:00 +0200 Subject: [PATCH 09/96] mmc: core: Make mmc_pre_req() and mmc_post_req() available Make mmc_pre_req() and mmc_post_req() available to the card drivers. Later patches will make use of this. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/core.c | 31 ------------------------------- drivers/mmc/core/core.h | 31 +++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 1f0f44f4dd5f..7ca6e4866a8b 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -657,37 +657,6 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq) } EXPORT_SYMBOL(mmc_is_req_done); -/** - * mmc_pre_req - Prepare for a new request - * @host: MMC host to prepare command - * @mrq: MMC request to prepare for - * - * mmc_pre_req() is called in prior to mmc_start_req() to let - * host prepare for the new request. Preparation of a request may be - * performed while another request is running on the host. - */ -static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq) -{ - if (host->ops->pre_req) - host->ops->pre_req(host, mrq); -} - -/** - * mmc_post_req - Post process a completed request - * @host: MMC host to post process command - * @mrq: MMC request to post process for - * @err: Error, if non zero, clean up any resources made in pre_req - * - * Let the host post process a completed request. Post processing of - * a request may be performed while another reuqest is running. - */ -static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, - int err) -{ - if (host->ops->post_req) - host->ops->post_req(host, mrq, err); -} - /** * mmc_finalize_areq() - finalize an asynchronous request * @host: MMC host to finalize any ongoing request on diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index b2877e2d740f..3e3d21304e5f 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -150,4 +150,35 @@ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq); void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq); int mmc_cqe_recovery(struct mmc_host *host); +/** + * mmc_pre_req - Prepare for a new request + * @host: MMC host to prepare command + * @mrq: MMC request to prepare for + * + * mmc_pre_req() is called in prior to mmc_start_req() to let + * host prepare for the new request. Preparation of a request may be + * performed while another request is running on the host. + */ +static inline void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq) +{ + if (host->ops->pre_req) + host->ops->pre_req(host, mrq); +} + +/** + * mmc_post_req - Post process a completed request + * @host: MMC host to post process command + * @mrq: MMC request to post process for + * @err: Error, if non zero, clean up any resources made in pre_req + * + * Let the host post process a completed request. Post processing of + * a request may be performed while another request is running. + */ +static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, + int err) +{ + if (host->ops->post_req) + host->ops->post_req(host, mrq, err); +} + #endif From 6d3898a6a517d0effa1d1e337c03b16bafb6fc96 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:01 +0200 Subject: [PATCH 10/96] mmc: block: Add error-handling comments Add error-handling comments to explain what would also be done for blk-mq if it used the legacy error-handling. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index e44f6d90aeb4..7dcd5d5b203b 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1911,7 +1911,11 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) case MMC_BLK_SUCCESS: case MMC_BLK_PARTIAL: /* - * A block was successfully transferred. + * Reset success, and accept bytes_xfered. For + * MMC_BLK_PARTIAL re-submit the remaining request. For + * MMC_BLK_SUCCESS error out the remaining request (it + * could not be re-submitted anyway if a next request + * had already begun). */ mmc_blk_reset_success(md, type); @@ -1931,6 +1935,14 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) } break; case MMC_BLK_CMD_ERR: + /* + * For SD cards, get bytes written, but do not accept + * bytes_xfered if that fails. For MMC cards accept + * bytes_xfered. Then try to reset. If reset fails then + * error out the remaining request, otherwise retry + * once (N.B mmc_blk_reset() will not succeed twice in a + * row). + */ req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); if (mmc_blk_reset(md, card->host, type)) { if (req_pending) @@ -1947,11 +1959,20 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) } break; case MMC_BLK_RETRY: + /* + * Do not accept bytes_xfered, but retry up to 5 times, + * otherwise same as abort. + */ retune_retry_done = brq->retune_retry_done; if (retry++ < 5) break; /* Fall through */ case MMC_BLK_ABORT: + /* + * Do not accept bytes_xfered, but try to reset. If + * reset succeeds, try once more, otherwise error out + * the request. + */ if (!mmc_blk_reset(md, card->host, type)) break; mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); @@ -1960,6 +1981,13 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) case MMC_BLK_DATA_ERR: { int err; + /* + * Do not accept bytes_xfered, but try to reset. If + * reset succeeds, try once more. If reset fails with + * ENODEV which means the partition is wrong, then error + * out the request. Otherwise attempt to read one sector + * at a time. + */ err = mmc_blk_reset(md, card->host, type); if (!err) break; @@ -1971,6 +1999,10 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) /* Fall through */ } case MMC_BLK_ECC_ERR: + /* + * Do not accept bytes_xfered. If reading more than one + * sector, try reading one sector at a time. + */ if (brq->data.blocks > 1) { /* Redo read one sector at a time */ pr_warn("%s: retrying using single block read\n", @@ -1992,10 +2024,12 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) } break; case MMC_BLK_NOMEDIUM: + /* Do not accept bytes_xfered. Error out the request */ mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); return; default: + /* Do not accept bytes_xfered. Error out the request */ pr_err("%s: Unhandled return value (%d)", old_req->rq_disk->disk_name, status); mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); From c3d53d0da69d127f488dc85638e9440220b268e8 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:02 +0200 Subject: [PATCH 11/96] mmc: core: Add parameter use_blk_mq Until mmc has blk-mq support fully implemented and tested, add a parameter use_blk_mq, set to true if config option MMC_MQ_DEFAULT is selected, which it is by default. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/Kconfig | 10 ++++++++++ drivers/mmc/core/core.c | 7 +++++++ drivers/mmc/core/core.h | 2 ++ drivers/mmc/core/host.c | 2 ++ drivers/mmc/core/host.h | 4 ++++ include/linux/mmc/host.h | 1 + 6 files changed, 26 insertions(+) diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index ec21388311db..42565562577c 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig @@ -12,6 +12,16 @@ menuconfig MMC If you want MMC/SD/SDIO support, you should say Y here and also to your specific host controller driver. +config MMC_MQ_DEFAULT + bool "MMC: use blk-mq I/O path by default" + depends on MMC && BLOCK + default y + ---help--- + This option enables the new blk-mq based I/O path for MMC block + devices by default. With the option the mmc_core.use_blk_mq + module/boot option defaults to Y, without it to N, but it can + still be overridden either way. + if MMC source "drivers/mmc/core/Kconfig" diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7ca6e4866a8b..617802f45386 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -66,6 +66,13 @@ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; bool use_spi_crc = 1; module_param(use_spi_crc, bool, 0); +#ifdef CONFIG_MMC_MQ_DEFAULT +bool mmc_use_blk_mq = true; +#else +bool mmc_use_blk_mq = false; +#endif +module_param_named(use_blk_mq, mmc_use_blk_mq, bool, S_IWUSR | S_IRUGO); + static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) { diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 3e3d21304e5f..136617d2f971 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -35,6 +35,8 @@ struct mmc_bus_ops { int (*reset)(struct mmc_host *); }; +extern bool mmc_use_blk_mq; + void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); void mmc_detach_bus(struct mmc_host *host); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 64b03d6eaf18..409a68a96a0a 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -404,6 +404,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) host->fixed_drv_type = -EINVAL; + host->use_blk_mq = mmc_use_blk_mq; + return host; } diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index fb689a1065ed..6eaf558e62d6 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -74,6 +74,10 @@ static inline bool mmc_card_hs400es(struct mmc_card *card) return card->host->ios.enhanced_strobe; } +static inline bool mmc_host_use_blk_mq(struct mmc_host *host) +{ + return host->use_blk_mq; +} #endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index e7743eca1021..ce2075d6f429 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -380,6 +380,7 @@ struct mmc_host { unsigned int doing_retune:1; /* re-tuning in progress */ unsigned int retune_now:1; /* do re-tuning at next req */ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ + unsigned int use_blk_mq:1; /* use blk-mq */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ From 81196976ed946cbf36bb41ddda402853c7df7cfa Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:03 +0200 Subject: [PATCH 12/96] mmc: block: Add blk-mq support Define and use a blk-mq queue. Discards and flushes are processed synchronously, but reads and writes asynchronously. In order to support slow DMA unmapping, DMA unmapping is not done until after the next request is started. That means the request is not completed until then. If there is no next request then the completion is done by queued work. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 502 ++++++++++++++++++++++++++++++++++++++- drivers/mmc/core/block.h | 9 + drivers/mmc/core/queue.c | 320 ++++++++++++++++++++++--- drivers/mmc/core/queue.h | 32 +++ 4 files changed, 820 insertions(+), 43 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 7dcd5d5b203b..7874c3bbf6b5 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1220,6 +1220,14 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) md->reset_done &= ~type; } +static void mmc_blk_end_request(struct request *req, blk_status_t error) +{ + if (req->mq_ctx) + blk_mq_end_request(req, error); + else + blk_end_request_all(req, error); +} + /* * The non-block commands come back from the block layer after it queued it and * processed it with all other requests and then they get issued in this @@ -1281,7 +1289,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) break; } mq_rq->drv_op_result = ret; - blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); + mmc_blk_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) @@ -1324,7 +1332,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) else mmc_blk_reset_success(md, type); fail: - blk_end_request(req, status, blk_rq_bytes(req)); + mmc_blk_end_request(req, status); } static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, @@ -1394,7 +1402,7 @@ out_retry: if (!err) mmc_blk_reset_success(md, type); out: - blk_end_request(req, status, blk_rq_bytes(req)); + mmc_blk_end_request(req, status); } static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) @@ -1404,7 +1412,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) int ret = 0; ret = mmc_flush_cache(card); - blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); + mmc_blk_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } /* @@ -1481,11 +1489,9 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) } } -static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, - struct mmc_async_req *areq) +static enum mmc_blk_status __mmc_blk_err_check(struct mmc_card *card, + struct mmc_queue_req *mq_mrq) { - struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, - areq); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mmc_queue_req_to_req(mq_mrq); int need_retune = card->host->need_retune; @@ -1591,6 +1597,15 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, return MMC_BLK_SUCCESS; } +static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, + struct mmc_async_req *areq) +{ + struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, + areq); + + return __mmc_blk_err_check(card, mq_mrq); +} + static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, int disable_multi, bool *do_rel_wr_p, bool *do_data_tag_p) @@ -1783,6 +1798,477 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, mqrq->areq.err_check = mmc_blk_err_check; } +#define MMC_MAX_RETRIES 5 +#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) + +#define MMC_READ_SINGLE_RETRIES 2 + +/* Single sector read during recovery */ +static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_request *mrq = &mqrq->brq.mrq; + struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; + blk_status_t error = BLK_STS_OK; + int retries = 0; + + do { + u32 status; + int err; + + mmc_blk_rw_rq_prep(mqrq, card, 1, mq); + + mmc_wait_for_req(host, mrq); + + err = mmc_send_status(card, &status); + if (err) + goto error_exit; + + if (!mmc_host_is_spi(host) && + R1_CURRENT_STATE(status) != R1_STATE_TRAN) { + u32 stop_status = 0; + bool gen_err = false; + + err = send_stop(card, + DIV_ROUND_UP(mrq->data->timeout_ns, + 1000000), + req, &gen_err, &stop_status); + if (err) + goto error_exit; + } + + if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) + continue; + + retries = 0; + + if (mrq->cmd->error || + mrq->data->error || + (!mmc_host_is_spi(host) && + (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) + error = BLK_STS_IOERR; + else + error = BLK_STS_OK; + + } while (blk_update_request(req, error, 512)); + + return; + +error_exit: + mrq->data->bytes_xfered = 0; + blk_update_request(req, BLK_STS_IOERR, 512); + /* Let it try the remaining request again */ + if (mqrq->retries > MMC_MAX_RETRIES - 1) + mqrq->retries = MMC_MAX_RETRIES - 1; +} + +static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) +{ + int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_blk_request *brq = &mqrq->brq; + struct mmc_blk_data *md = mq->blkdata; + struct mmc_card *card = mq->card; + static enum mmc_blk_status status; + + brq->retune_retry_done = mqrq->retries; + + status = __mmc_blk_err_check(card, mqrq); + + mmc_retune_release(card->host); + + /* + * Requests are completed by mmc_blk_mq_complete_rq() which sets simple + * policy: + * 1. A request that has transferred at least some data is considered + * successful and will be requeued if there is remaining data to + * transfer. + * 2. Otherwise the number of retries is incremented and the request + * will be requeued if there are remaining retries. + * 3. Otherwise the request will be errored out. + * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and + * mqrq->retries. So there are only 4 possible actions here: + * 1. do not accept the bytes_xfered value i.e. set it to zero + * 2. change mqrq->retries to determine the number of retries + * 3. try to reset the card + * 4. read one sector at a time + */ + switch (status) { + case MMC_BLK_SUCCESS: + case MMC_BLK_PARTIAL: + /* Reset success, and accept bytes_xfered */ + mmc_blk_reset_success(md, type); + break; + case MMC_BLK_CMD_ERR: + /* + * For SD cards, get bytes written, but do not accept + * bytes_xfered if that fails. For MMC cards accept + * bytes_xfered. Then try to reset. If reset fails then + * error out the remaining request, otherwise retry + * once (N.B mmc_blk_reset() will not succeed twice in a + * row). + */ + if (mmc_card_sd(card)) { + u32 blocks; + int err; + + err = mmc_sd_num_wr_blocks(card, &blocks); + if (err) + brq->data.bytes_xfered = 0; + else + brq->data.bytes_xfered = blocks << 9; + } + if (mmc_blk_reset(md, card->host, type)) + mqrq->retries = MMC_NO_RETRIES; + else + mqrq->retries = MMC_MAX_RETRIES - 1; + break; + case MMC_BLK_RETRY: + /* + * Do not accept bytes_xfered, but retry up to 5 times, + * otherwise same as abort. + */ + brq->data.bytes_xfered = 0; + if (mqrq->retries < MMC_MAX_RETRIES) + break; + /* Fall through */ + case MMC_BLK_ABORT: + /* + * Do not accept bytes_xfered, but try to reset. If + * reset succeeds, try once more, otherwise error out + * the request. + */ + brq->data.bytes_xfered = 0; + if (mmc_blk_reset(md, card->host, type)) + mqrq->retries = MMC_NO_RETRIES; + else + mqrq->retries = MMC_MAX_RETRIES - 1; + break; + case MMC_BLK_DATA_ERR: { + int err; + + /* + * Do not accept bytes_xfered, but try to reset. If + * reset succeeds, try once more. If reset fails with + * ENODEV which means the partition is wrong, then error + * out the request. Otherwise attempt to read one sector + * at a time. + */ + brq->data.bytes_xfered = 0; + err = mmc_blk_reset(md, card->host, type); + if (!err) { + mqrq->retries = MMC_MAX_RETRIES - 1; + break; + } + if (err == -ENODEV) { + mqrq->retries = MMC_NO_RETRIES; + break; + } + /* Fall through */ + } + case MMC_BLK_ECC_ERR: + /* + * Do not accept bytes_xfered. If reading more than one + * sector, try reading one sector at a time. + */ + brq->data.bytes_xfered = 0; + /* FIXME: Missing single sector read for large sector size */ + if (brq->data.blocks > 1 && !mmc_large_sector(card)) { + /* Redo read one sector at a time */ + pr_warn("%s: retrying using single block read\n", + req->rq_disk->disk_name); + mmc_blk_read_single(mq, req); + } else { + mqrq->retries = MMC_NO_RETRIES; + } + break; + case MMC_BLK_NOMEDIUM: + /* Do not accept bytes_xfered. Error out the request */ + brq->data.bytes_xfered = 0; + mqrq->retries = MMC_NO_RETRIES; + break; + default: + /* Do not accept bytes_xfered. Error out the request */ + brq->data.bytes_xfered = 0; + mqrq->retries = MMC_NO_RETRIES; + pr_err("%s: Unhandled return value (%d)", + req->rq_disk->disk_name, status); + break; + } +} + +static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; + + if (nr_bytes) { + if (blk_update_request(req, BLK_STS_OK, nr_bytes)) + blk_mq_requeue_request(req, true); + else + __blk_mq_end_request(req, BLK_STS_OK); + } else if (!blk_rq_bytes(req)) { + __blk_mq_end_request(req, BLK_STS_IOERR); + } else if (mqrq->retries++ < MMC_MAX_RETRIES) { + blk_mq_requeue_request(req, true); + } else { + if (mmc_card_removed(mq->card)) + req->rq_flags |= RQF_QUIET; + blk_mq_end_request(req, BLK_STS_IOERR); + } +} + +static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, + struct mmc_queue_req *mqrq) +{ + return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && + (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || + mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); +} + +static void mmc_blk_urgent_bkops(struct mmc_queue *mq, + struct mmc_queue_req *mqrq) +{ + if (mmc_blk_urgent_bkops_needed(mq, mqrq)) + mmc_start_bkops(mq->card, true); +} + +void mmc_blk_mq_complete(struct request *req) +{ + struct mmc_queue *mq = req->q->queuedata; + + mmc_blk_mq_complete_rq(mq, req); +} + +static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + + mmc_blk_mq_rw_recovery(mq, req); + + mmc_blk_urgent_bkops(mq, mqrq); +} + +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) +{ + struct request_queue *q = req->q; + unsigned long flags; + bool put_card; + + spin_lock_irqsave(q->queue_lock, flags); + + mq->in_flight[mmc_issue_type(mq, req)] -= 1; + + put_card = (mmc_tot_in_flight(mq) == 0); + + spin_unlock_irqrestore(q->queue_lock, flags); + + if (put_card) + mmc_put_card(mq->card, &mq->ctx); +} + +static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_request *mrq = &mqrq->brq.mrq; + struct mmc_host *host = mq->card->host; + + mmc_post_req(host, mrq, 0); + + blk_mq_complete_request(req); + + mmc_blk_mq_dec_in_flight(mq, req); +} + +static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, + struct request **prev_req) +{ + mutex_lock(&mq->complete_lock); + + if (!mq->complete_req) + goto out_unlock; + + mmc_blk_mq_poll_completion(mq, mq->complete_req); + + if (prev_req) + *prev_req = mq->complete_req; + else + mmc_blk_mq_post_req(mq, mq->complete_req); + + mq->complete_req = NULL; + +out_unlock: + mutex_unlock(&mq->complete_lock); +} + +void mmc_blk_mq_complete_work(struct work_struct *work) +{ + struct mmc_queue *mq = container_of(work, struct mmc_queue, + complete_work); + + mmc_blk_mq_complete_prev_req(mq, NULL); +} + +static void mmc_blk_mq_req_done(struct mmc_request *mrq) +{ + struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, + brq.mrq); + struct request *req = mmc_queue_req_to_req(mqrq); + struct request_queue *q = req->q; + struct mmc_queue *mq = q->queuedata; + unsigned long flags; + bool waiting; + + /* + * We cannot complete the request in this context, so record that there + * is a request to complete, and that a following request does not need + * to wait (although it does need to complete complete_req first). + */ + spin_lock_irqsave(q->queue_lock, flags); + mq->complete_req = req; + mq->rw_wait = false; + waiting = mq->waiting; + spin_unlock_irqrestore(q->queue_lock, flags); + + /* + * If 'waiting' then the waiting task will complete this request, + * otherwise queue a work to do it. Note that complete_work may still + * race with the dispatch of a following request. + */ + if (waiting) + wake_up(&mq->wait); + else + kblockd_schedule_work(&mq->complete_work); +} + +static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) +{ + struct request_queue *q = mq->queue; + unsigned long flags; + bool done; + + /* + * Wait while there is another request in progress. Also indicate that + * there is a request waiting to start. + */ + spin_lock_irqsave(q->queue_lock, flags); + done = !mq->rw_wait; + mq->waiting = !done; + spin_unlock_irqrestore(q->queue_lock, flags); + + return done; +} + +static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) +{ + int err = 0; + + wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); + + /* Always complete the previous request if there is one */ + mmc_blk_mq_complete_prev_req(mq, prev_req); + + return err; +} + +static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_host *host = mq->card->host; + struct request *prev_req = NULL; + int err = 0; + + mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); + + mqrq->brq.mrq.done = mmc_blk_mq_req_done; + + mmc_pre_req(host, &mqrq->brq.mrq); + + err = mmc_blk_rw_wait(mq, &prev_req); + if (err) + goto out_post_req; + + mq->rw_wait = true; + + err = mmc_start_request(host, &mqrq->brq.mrq); + + if (prev_req) + mmc_blk_mq_post_req(mq, prev_req); + + if (err) { + mq->rw_wait = false; + mmc_retune_release(host); + } + +out_post_req: + if (err) + mmc_post_req(host, &mqrq->brq.mrq, err); + + return err; +} + +static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) +{ + return mmc_blk_rw_wait(mq, NULL); +} + +enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_blk_data *md = mq->blkdata; + struct mmc_card *card = md->queue.card; + struct mmc_host *host = card->host; + int ret; + + ret = mmc_blk_part_switch(card, md->part_type); + if (ret) + return MMC_REQ_FAILED_TO_START; + + switch (mmc_issue_type(mq, req)) { + case MMC_ISSUE_SYNC: + ret = mmc_blk_wait_for_idle(mq, host); + if (ret) + return MMC_REQ_BUSY; + switch (req_op(req)) { + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + mmc_blk_issue_drv_op(mq, req); + break; + case REQ_OP_DISCARD: + mmc_blk_issue_discard_rq(mq, req); + break; + case REQ_OP_SECURE_ERASE: + mmc_blk_issue_secdiscard_rq(mq, req); + break; + case REQ_OP_FLUSH: + mmc_blk_issue_flush(mq, req); + break; + default: + WARN_ON_ONCE(1); + return MMC_REQ_FAILED_TO_START; + } + return MMC_REQ_FINISHED; + case MMC_ISSUE_ASYNC: + switch (req_op(req)) { + case REQ_OP_READ: + case REQ_OP_WRITE: + ret = mmc_blk_mq_issue_rw_rq(mq, req); + break; + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + } + if (!ret) + return MMC_REQ_STARTED; + return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; + default: + WARN_ON_ONCE(1); + return MMC_REQ_FAILED_TO_START; + } +} + static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, bool old_req_pending) diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h index 5946636101ef..6d34e87b18f6 100644 --- a/drivers/mmc/core/block.h +++ b/drivers/mmc/core/block.h @@ -7,4 +7,13 @@ struct request; void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req); +enum mmc_issued; + +enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req); +void mmc_blk_mq_complete(struct request *req); + +struct work_struct; + +void mmc_blk_mq_complete_work(struct work_struct *work); + #endif diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index ae6d9da68735..54bec4c6c9bd 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -22,6 +22,7 @@ #include "block.h" #include "core.h" #include "card.h" +#include "host.h" /* * Prepare a MMC request. This just filters out odd stuff. @@ -34,10 +35,25 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) return BLKPREP_KILL; req->rq_flags |= RQF_DONTPREP; + req_to_mmc_queue_req(req)->retries = 0; return BLKPREP_OK; } +enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) +{ + if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) + return MMC_ISSUE_ASYNC; + + return MMC_ISSUE_SYNC; +} + +static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, + bool reserved) +{ + return BLK_EH_RESET_TIMER; +} + static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; @@ -154,11 +170,10 @@ static void mmc_queue_setup_discard(struct request_queue *q, * @req: the request * @gfp: memory allocation policy */ -static int mmc_init_request(struct request_queue *q, struct request *req, - gfp_t gfp) +static int __mmc_init_request(struct mmc_queue *mq, struct request *req, + gfp_t gfp) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); - struct mmc_queue *mq = q->queuedata; struct mmc_card *card = mq->card; struct mmc_host *host = card->host; @@ -169,6 +184,12 @@ static int mmc_init_request(struct request_queue *q, struct request *req, return 0; } +static int mmc_init_request(struct request_queue *q, struct request *req, + gfp_t gfp) +{ + return __mmc_init_request(q->queuedata, req, gfp); +} + static void mmc_exit_request(struct request_queue *q, struct request *req) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); @@ -177,6 +198,112 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) mq_rq->sg = NULL; } +static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, + unsigned int hctx_idx, unsigned int numa_node) +{ + return __mmc_init_request(set->driver_data, req, GFP_KERNEL); +} + +static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, + unsigned int hctx_idx) +{ + struct mmc_queue *mq = set->driver_data; + + mmc_exit_request(mq->queue, req); +} + +/* + * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests + * will not be dispatched in parallel. + */ +static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct request *req = bd->rq; + struct request_queue *q = req->q; + struct mmc_queue *mq = q->queuedata; + struct mmc_card *card = mq->card; + enum mmc_issue_type issue_type; + enum mmc_issued issued; + bool get_card; + int ret; + + if (mmc_card_removed(mq->card)) { + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } + + issue_type = mmc_issue_type(mq, req); + + spin_lock_irq(q->queue_lock); + + switch (issue_type) { + case MMC_ISSUE_ASYNC: + break; + default: + /* + * Timeouts are handled by mmc core, and we don't have a host + * API to abort requests, so we can't handle the timeout anyway. + * However, when the timeout happens, blk_mq_complete_request() + * no longer works (to stop the request disappearing under us). + * To avoid racing with that, set a large timeout. + */ + req->timeout = 600 * HZ; + break; + } + + mq->in_flight[issue_type] += 1; + get_card = (mmc_tot_in_flight(mq) == 1); + + spin_unlock_irq(q->queue_lock); + + if (!(req->rq_flags & RQF_DONTPREP)) { + req_to_mmc_queue_req(req)->retries = 0; + req->rq_flags |= RQF_DONTPREP; + } + + if (get_card) + mmc_get_card(card, &mq->ctx); + + blk_mq_start_request(req); + + issued = mmc_blk_mq_issue_rq(mq, req); + + switch (issued) { + case MMC_REQ_BUSY: + ret = BLK_STS_RESOURCE; + break; + case MMC_REQ_FAILED_TO_START: + ret = BLK_STS_IOERR; + break; + default: + ret = BLK_STS_OK; + break; + } + + if (issued != MMC_REQ_STARTED) { + bool put_card = false; + + spin_lock_irq(q->queue_lock); + mq->in_flight[issue_type] -= 1; + if (mmc_tot_in_flight(mq) == 0) + put_card = true; + spin_unlock_irq(q->queue_lock); + if (put_card) + mmc_put_card(card, &mq->ctx); + } + + return ret; +} + +static const struct blk_mq_ops mmc_mq_ops = { + .queue_rq = mmc_mq_queue_rq, + .init_request = mmc_mq_init_request, + .exit_request = mmc_mq_exit_request, + .complete = mmc_blk_mq_complete, + .timeout = mmc_mq_timed_out, +}; + static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; @@ -198,6 +325,70 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) /* Initialize thread_sem even if it is not used */ sema_init(&mq->thread_sem, 1); + + INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); + + mutex_init(&mq->complete_lock); + + init_waitqueue_head(&mq->wait); +} + +static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth, + const struct blk_mq_ops *mq_ops, spinlock_t *lock) +{ + int ret; + + memset(&mq->tag_set, 0, sizeof(mq->tag_set)); + mq->tag_set.ops = mq_ops; + mq->tag_set.queue_depth = q_depth; + mq->tag_set.numa_node = NUMA_NO_NODE; + mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | + BLK_MQ_F_BLOCKING; + mq->tag_set.nr_hw_queues = 1; + mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); + mq->tag_set.driver_data = mq; + + ret = blk_mq_alloc_tag_set(&mq->tag_set); + if (ret) + return ret; + + mq->queue = blk_mq_init_queue(&mq->tag_set); + if (IS_ERR(mq->queue)) { + ret = PTR_ERR(mq->queue); + goto free_tag_set; + } + + mq->queue->queue_lock = lock; + mq->queue->queuedata = mq; + + return 0; + +free_tag_set: + blk_mq_free_tag_set(&mq->tag_set); + + return ret; +} + +/* Set queue depth to get a reasonable value for q->nr_requests */ +#define MMC_QUEUE_DEPTH 64 + +static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card, + spinlock_t *lock) +{ + int q_depth; + int ret; + + q_depth = MMC_QUEUE_DEPTH; + + ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock); + if (ret) + return ret; + + blk_queue_rq_timeout(mq->queue, 60 * HZ); + + mmc_setup_queue(mq, card); + + return 0; } /** @@ -216,6 +407,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, int ret = -ENOMEM; mq->card = card; + + if (mmc_host_use_blk_mq(host)) + return mmc_mq_init(mq, card, lock); + mq->queue = blk_alloc_queue(GFP_KERNEL); if (!mq->queue) return -ENOMEM; @@ -251,37 +446,24 @@ cleanup_queue: return ret; } -void mmc_cleanup_queue(struct mmc_queue *mq) +static void mmc_mq_queue_suspend(struct mmc_queue *mq) { - struct request_queue *q = mq->queue; - unsigned long flags; + blk_mq_quiesce_queue(mq->queue); - /* Make sure the queue isn't suspended, as that will deadlock */ - mmc_queue_resume(mq); - - /* Then terminate our worker thread */ - kthread_stop(mq->thread); - - /* Empty the queue */ - spin_lock_irqsave(q->queue_lock, flags); - q->queuedata = NULL; - blk_start_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); - - blk_cleanup_queue(q); - - mq->card = NULL; + /* + * The host remains claimed while there are outstanding requests, so + * simply claiming and releasing here ensures there are none. + */ + mmc_claim_host(mq->card->host); + mmc_release_host(mq->card->host); } -/** - * mmc_queue_suspend - suspend a MMC request queue - * @mq: MMC queue to suspend - * - * Stop the block request queue, and wait for our thread to - * complete any outstanding requests. This ensures that we - * won't suspend while a request is being processed. - */ -void mmc_queue_suspend(struct mmc_queue *mq) +static void mmc_mq_queue_resume(struct mmc_queue *mq) +{ + blk_mq_unquiesce_queue(mq->queue); +} + +static void __mmc_queue_suspend(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; @@ -297,11 +479,7 @@ void mmc_queue_suspend(struct mmc_queue *mq) } } -/** - * mmc_queue_resume - resume a previously suspended MMC request queue - * @mq: MMC queue to resume - */ -void mmc_queue_resume(struct mmc_queue *mq) +static void __mmc_queue_resume(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; @@ -317,6 +495,78 @@ void mmc_queue_resume(struct mmc_queue *mq) } } +void mmc_cleanup_queue(struct mmc_queue *mq) +{ + struct request_queue *q = mq->queue; + unsigned long flags; + + if (q->mq_ops) { + /* + * The legacy code handled the possibility of being suspended, + * so do that here too. + */ + if (blk_queue_quiesced(q)) + blk_mq_unquiesce_queue(q); + goto out_cleanup; + } + + /* Make sure the queue isn't suspended, as that will deadlock */ + mmc_queue_resume(mq); + + /* Then terminate our worker thread */ + kthread_stop(mq->thread); + + /* Empty the queue */ + spin_lock_irqsave(q->queue_lock, flags); + q->queuedata = NULL; + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + +out_cleanup: + blk_cleanup_queue(q); + + /* + * A request can be completed before the next request, potentially + * leaving a complete_work with nothing to do. Such a work item might + * still be queued at this point. Flush it. + */ + flush_work(&mq->complete_work); + + mq->card = NULL; +} + +/** + * mmc_queue_suspend - suspend a MMC request queue + * @mq: MMC queue to suspend + * + * Stop the block request queue, and wait for our thread to + * complete any outstanding requests. This ensures that we + * won't suspend while a request is being processed. + */ +void mmc_queue_suspend(struct mmc_queue *mq) +{ + struct request_queue *q = mq->queue; + + if (q->mq_ops) + mmc_mq_queue_suspend(mq); + else + __mmc_queue_suspend(mq); +} + +/** + * mmc_queue_resume - resume a previously suspended MMC request queue + * @mq: MMC queue to resume + */ +void mmc_queue_resume(struct mmc_queue *mq) +{ + struct request_queue *q = mq->queue; + + if (q->mq_ops) + mmc_mq_queue_resume(mq); + else + __mmc_queue_resume(mq); +} + /* * Prepare the sg list(s) to be handed of to the host driver */ diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 547b457c4251..ce9249852f26 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -8,6 +8,19 @@ #include #include +enum mmc_issued { + MMC_REQ_STARTED, + MMC_REQ_BUSY, + MMC_REQ_FAILED_TO_START, + MMC_REQ_FINISHED, +}; + +enum mmc_issue_type { + MMC_ISSUE_SYNC, + MMC_ISSUE_ASYNC, + MMC_ISSUE_MAX, +}; + static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq) { return blk_mq_rq_to_pdu(rq); @@ -57,12 +70,15 @@ struct mmc_queue_req { int drv_op_result; void *drv_op_data; unsigned int ioc_count; + int retries; }; struct mmc_queue { struct mmc_card *card; struct task_struct *thread; struct semaphore thread_sem; + struct mmc_ctx ctx; + struct blk_mq_tag_set tag_set; bool suspended; bool asleep; struct mmc_blk_data *blkdata; @@ -74,6 +90,14 @@ struct mmc_queue { * associated mmc_queue_req data. */ int qcnt; + + int in_flight[MMC_ISSUE_MAX]; + bool rw_wait; + bool waiting; + wait_queue_head_t wait; + struct request *complete_req; + struct mutex complete_lock; + struct work_struct complete_work; }; extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, @@ -84,4 +108,12 @@ extern void mmc_queue_resume(struct mmc_queue *); extern unsigned int mmc_queue_map_sg(struct mmc_queue *, struct mmc_queue_req *); +enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req); + +static inline int mmc_tot_in_flight(struct mmc_queue *mq) +{ + return mq->in_flight[MMC_ISSUE_SYNC] + + mq->in_flight[MMC_ISSUE_ASYNC]; +} + #endif From 1e8e55b67030c6a2fef893d428bdcd611f73705c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:04 +0200 Subject: [PATCH 13/96] mmc: block: Add CQE support Add CQE support to the block driver, including: - optionally using DCMD for flush requests - "manually" issuing discard requests - issuing read / write requests to the CQE - supporting block-layer timeouts - handling recovery - supporting re-tuning CQE offers 25% - 50% better random multi-threaded I/O. There is a slight (e.g. 2%) drop in sequential read speed but no observable change to sequential write. CQE automatically sends the commands to complete requests. However it only supports reads / writes and so-called "direct commands" (DCMD). Furthermore DCMD is limited to one command at a time, but discards require 3 commands. That makes issuing discards through CQE very awkward, but some CQE's don't support DCMD anyway. So for discards, the existing non-CQE approach is taken, where the mmc core code issues the 3 commands one at a time i.e. mmc_erase(). Where DCMD is used, is for issuing flushes. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 150 +++++++++++++++++++++++++++++++++++- drivers/mmc/core/block.h | 2 + drivers/mmc/core/queue.c | 162 ++++++++++++++++++++++++++++++++++++++- drivers/mmc/core/queue.h | 18 +++++ 4 files changed, 326 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 7874c3bbf6b5..7275ac5d6799 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -112,6 +112,7 @@ struct mmc_blk_data { #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) +#define MMC_BLK_CQE_RECOVERY BIT(4) /* * Only set in main mmc_blk_data associated @@ -1730,6 +1731,138 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, *do_data_tag_p = do_data_tag; } +#define MMC_CQE_RETRIES 2 + +static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_request *mrq = &mqrq->brq.mrq; + struct request_queue *q = req->q; + struct mmc_host *host = mq->card->host; + unsigned long flags; + bool put_card; + int err; + + mmc_cqe_post_req(host, mrq); + + if (mrq->cmd && mrq->cmd->error) + err = mrq->cmd->error; + else if (mrq->data && mrq->data->error) + err = mrq->data->error; + else + err = 0; + + if (err) { + if (mqrq->retries++ < MMC_CQE_RETRIES) + blk_mq_requeue_request(req, true); + else + blk_mq_end_request(req, BLK_STS_IOERR); + } else if (mrq->data) { + if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered)) + blk_mq_requeue_request(req, true); + else + __blk_mq_end_request(req, BLK_STS_OK); + } else { + blk_mq_end_request(req, BLK_STS_OK); + } + + spin_lock_irqsave(q->queue_lock, flags); + + mq->in_flight[mmc_issue_type(mq, req)] -= 1; + + put_card = (mmc_tot_in_flight(mq) == 0); + + mmc_cqe_check_busy(mq); + + spin_unlock_irqrestore(q->queue_lock, flags); + + if (!mq->cqe_busy) + blk_mq_run_hw_queues(q, true); + + if (put_card) + mmc_put_card(mq->card, &mq->ctx); +} + +void mmc_blk_cqe_recovery(struct mmc_queue *mq) +{ + struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; + int err; + + pr_debug("%s: CQE recovery start\n", mmc_hostname(host)); + + err = mmc_cqe_recovery(host); + if (err) + mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); + else + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + + pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); +} + +static void mmc_blk_cqe_req_done(struct mmc_request *mrq) +{ + struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, + brq.mrq); + struct request *req = mmc_queue_req_to_req(mqrq); + struct request_queue *q = req->q; + struct mmc_queue *mq = q->queuedata; + + /* + * Block layer timeouts race with completions which means the normal + * completion path cannot be used during recovery. + */ + if (mq->in_recovery) + mmc_blk_cqe_complete_rq(mq, req); + else + blk_mq_complete_request(req); +} + +static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) +{ + mrq->done = mmc_blk_cqe_req_done; + mrq->recovery_notifier = mmc_cqe_recovery_notifier; + + return mmc_cqe_start_req(host, mrq); +} + +static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, + struct request *req) +{ + struct mmc_blk_request *brq = &mqrq->brq; + + memset(brq, 0, sizeof(*brq)); + + brq->mrq.cmd = &brq->cmd; + brq->mrq.tag = req->tag; + + return &brq->mrq; +} + +static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); + + mrq->cmd->opcode = MMC_SWITCH; + mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | + (EXT_CSD_FLUSH_CACHE << 16) | + (1 << 8) | + EXT_CSD_CMD_SET_NORMAL; + mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; + + return mmc_blk_cqe_start_req(mq->card->host, mrq); +} + +static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + + mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); + + return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); +} + static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, @@ -2038,7 +2171,10 @@ void mmc_blk_mq_complete(struct request *req) { struct mmc_queue *mq = req->q->queuedata; - mmc_blk_mq_complete_rq(mq, req); + if (mq->use_cqe) + mmc_blk_cqe_complete_rq(mq, req); + else + mmc_blk_mq_complete_rq(mq, req); } static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, @@ -2212,6 +2348,9 @@ out_post_req: static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) { + if (mq->use_cqe) + return host->cqe_ops->cqe_wait_for_idle(host); + return mmc_blk_rw_wait(mq, NULL); } @@ -2250,11 +2389,18 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) return MMC_REQ_FAILED_TO_START; } return MMC_REQ_FINISHED; + case MMC_ISSUE_DCMD: case MMC_ISSUE_ASYNC: switch (req_op(req)) { + case REQ_OP_FLUSH: + ret = mmc_blk_cqe_issue_flush(mq, req); + break; case REQ_OP_READ: case REQ_OP_WRITE: - ret = mmc_blk_mq_issue_rw_rq(mq, req); + if (mq->use_cqe) + ret = mmc_blk_cqe_issue_rw_rq(mq, req); + else + ret = mmc_blk_mq_issue_rw_rq(mq, req); break; default: WARN_ON_ONCE(1); diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h index 6d34e87b18f6..f472ce5d5647 100644 --- a/drivers/mmc/core/block.h +++ b/drivers/mmc/core/block.h @@ -7,6 +7,8 @@ struct request; void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req); +void mmc_blk_cqe_recovery(struct mmc_queue *mq); + enum mmc_issued; enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 54bec4c6c9bd..8d632d2f5199 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -40,18 +40,142 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) return BLKPREP_OK; } +static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) +{ + /* Allow only 1 DCMD at a time */ + return mq->in_flight[MMC_ISSUE_DCMD]; +} + +void mmc_cqe_check_busy(struct mmc_queue *mq) +{ + if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) + mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; + + mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; +} + +static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) +{ + return host->caps2 & MMC_CAP2_CQE_DCMD; +} + +enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, + struct request *req) +{ + switch (req_op(req)) { + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + return MMC_ISSUE_SYNC; + case REQ_OP_FLUSH: + return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; + default: + return MMC_ISSUE_ASYNC; + } +} + enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) { + struct mmc_host *host = mq->card->host; + + if (mq->use_cqe) + return mmc_cqe_issue_type(host, req); + if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) return MMC_ISSUE_ASYNC; return MMC_ISSUE_SYNC; } +static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) +{ + if (!mq->recovery_needed) { + mq->recovery_needed = true; + schedule_work(&mq->recovery_work); + } +} + +void mmc_cqe_recovery_notifier(struct mmc_request *mrq) +{ + struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, + brq.mrq); + struct request *req = mmc_queue_req_to_req(mqrq); + struct request_queue *q = req->q; + struct mmc_queue *mq = q->queuedata; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __mmc_cqe_recovery_notifier(mq); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_request *mrq = &mqrq->brq.mrq; + struct mmc_queue *mq = req->q->queuedata; + struct mmc_host *host = mq->card->host; + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); + bool recovery_needed = false; + + switch (issue_type) { + case MMC_ISSUE_ASYNC: + case MMC_ISSUE_DCMD: + if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { + if (recovery_needed) + __mmc_cqe_recovery_notifier(mq); + return BLK_EH_RESET_TIMER; + } + /* No timeout */ + return BLK_EH_HANDLED; + default: + /* Timeout is handled by mmc core */ + return BLK_EH_RESET_TIMER; + } +} + static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, bool reserved) { - return BLK_EH_RESET_TIMER; + struct request_queue *q = req->q; + struct mmc_queue *mq = q->queuedata; + unsigned long flags; + int ret; + + spin_lock_irqsave(q->queue_lock, flags); + + if (mq->recovery_needed || !mq->use_cqe) + ret = BLK_EH_RESET_TIMER; + else + ret = mmc_cqe_timed_out(req); + + spin_unlock_irqrestore(q->queue_lock, flags); + + return ret; +} + +static void mmc_mq_recovery_handler(struct work_struct *work) +{ + struct mmc_queue *mq = container_of(work, struct mmc_queue, + recovery_work); + struct request_queue *q = mq->queue; + + mmc_get_card(mq->card, &mq->ctx); + + mq->in_recovery = true; + + mmc_blk_cqe_recovery(mq); + + mq->in_recovery = false; + + spin_lock_irq(q->queue_lock); + mq->recovery_needed = false; + spin_unlock_irq(q->queue_lock); + + mmc_put_card(mq->card, &mq->ctx); + + blk_mq_run_hw_queues(q, true); } static int mmc_queue_thread(void *d) @@ -223,9 +347,10 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, struct request_queue *q = req->q; struct mmc_queue *mq = q->queuedata; struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; enum mmc_issue_type issue_type; enum mmc_issued issued; - bool get_card; + bool get_card, cqe_retune_ok; int ret; if (mmc_card_removed(mq->card)) { @@ -237,7 +362,19 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, spin_lock_irq(q->queue_lock); + if (mq->recovery_needed) { + spin_unlock_irq(q->queue_lock); + return BLK_STS_RESOURCE; + } + switch (issue_type) { + case MMC_ISSUE_DCMD: + if (mmc_cqe_dcmd_busy(mq)) { + mq->cqe_busy |= MMC_CQE_DCMD_BUSY; + spin_unlock_irq(q->queue_lock); + return BLK_STS_RESOURCE; + } + break; case MMC_ISSUE_ASYNC: break; default: @@ -254,6 +391,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, mq->in_flight[issue_type] += 1; get_card = (mmc_tot_in_flight(mq) == 1); + cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); spin_unlock_irq(q->queue_lock); @@ -265,6 +403,11 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, if (get_card) mmc_get_card(card, &mq->ctx); + if (mq->use_cqe) { + host->retune_now = host->need_retune && cqe_retune_ok && + !host->hold_retune; + } + blk_mq_start_request(req); issued = mmc_blk_mq_issue_rq(mq, req); @@ -326,6 +469,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) /* Initialize thread_sem even if it is not used */ sema_init(&mq->thread_sem, 1); + INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); mutex_init(&mq->complete_lock); @@ -375,10 +519,18 @@ free_tag_set: static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) { + struct mmc_host *host = card->host; int q_depth; int ret; - q_depth = MMC_QUEUE_DEPTH; + /* + * The queue depth for CQE must match the hardware because the request + * tag is used to index the hardware queue. + */ + if (mq->use_cqe) + q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); + else + q_depth = MMC_QUEUE_DEPTH; ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock); if (ret) @@ -408,7 +560,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, mq->card = card; - if (mmc_host_use_blk_mq(host)) + mq->use_cqe = host->cqe_enabled; + + if (mq->use_cqe || mmc_host_use_blk_mq(host)) return mmc_mq_init(mq, card, lock); mq->queue = blk_alloc_queue(GFP_KERNEL); diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index ce9249852f26..1d7d3b0afff8 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -17,6 +17,7 @@ enum mmc_issued { enum mmc_issue_type { MMC_ISSUE_SYNC, + MMC_ISSUE_DCMD, MMC_ISSUE_ASYNC, MMC_ISSUE_MAX, }; @@ -92,8 +93,15 @@ struct mmc_queue { int qcnt; int in_flight[MMC_ISSUE_MAX]; + unsigned int cqe_busy; +#define MMC_CQE_DCMD_BUSY BIT(0) +#define MMC_CQE_QUEUE_FULL BIT(1) + bool use_cqe; + bool recovery_needed; + bool in_recovery; bool rw_wait; bool waiting; + struct work_struct recovery_work; wait_queue_head_t wait; struct request *complete_req; struct mutex complete_lock; @@ -108,11 +116,21 @@ extern void mmc_queue_resume(struct mmc_queue *); extern unsigned int mmc_queue_map_sg(struct mmc_queue *, struct mmc_queue_req *); +void mmc_cqe_check_busy(struct mmc_queue *mq); +void mmc_cqe_recovery_notifier(struct mmc_request *mrq); + enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req); static inline int mmc_tot_in_flight(struct mmc_queue *mq) { return mq->in_flight[MMC_ISSUE_SYNC] + + mq->in_flight[MMC_ISSUE_DCMD] + + mq->in_flight[MMC_ISSUE_ASYNC]; +} + +static inline int mmc_cqe_qcnt(struct mmc_queue *mq) +{ + return mq->in_flight[MMC_ISSUE_DCMD] + mq->in_flight[MMC_ISSUE_ASYNC]; } From a4080225f51dcea129d26185a35acfbb3770a32d Mon Sep 17 00:00:00 2001 From: Venkat Gopalakrishnan Date: Wed, 29 Nov 2017 15:41:05 +0200 Subject: [PATCH 14/96] mmc: cqhci: support for command queue enabled host This patch adds CMDQ support for command-queue compatible hosts. Command queue is added in eMMC-5.1 specification. This enables the controller to process upto 32 requests at a time. Adrian Hunter contributed renaming to cqhci, recovery, suspend and resume, cqhci_off, cqhci_wait_for_idle, and external timeout handling. Signed-off-by: Asutosh Das Signed-off-by: Sujit Reddy Thumma Signed-off-by: Konstantin Dorfman Signed-off-by: Venkat Gopalakrishnan Signed-off-by: Subhash Jadavani Signed-off-by: Ritesh Harjani Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/host/Kconfig | 13 + drivers/mmc/host/Makefile | 1 + drivers/mmc/host/cqhci.c | 1150 +++++++++++++++++++++++++++++++++++++ drivers/mmc/host/cqhci.h | 240 ++++++++ 4 files changed, 1404 insertions(+) create mode 100644 drivers/mmc/host/cqhci.c create mode 100644 drivers/mmc/host/cqhci.h diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 567028c9219a..3092b7085cb5 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -857,6 +857,19 @@ config MMC_SUNXI This selects support for the SD/MMC Host Controller on Allwinner sunxi SoCs. +config MMC_CQHCI + tristate "Command Queue Host Controller Interface support" + depends on HAS_DMA + help + This selects the Command Queue Host Controller Interface (CQHCI) + support present in host controllers of Qualcomm Technologies, Inc + amongst others. + This controller supports eMMC devices with command queue support. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + config MMC_TOSHIBA_PCI tristate "Toshiba Type A SD/MMC Card Interface Driver" depends on PCI diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index a43cf0d5a5d3..407a011026cd 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -92,6 +92,7 @@ obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o +obj-$(CONFIG_MMC_CQHCI) += cqhci.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c new file mode 100644 index 000000000000..159270e947cf --- /dev/null +++ b/drivers/mmc/host/cqhci.c @@ -0,0 +1,1150 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cqhci.h" + +#define DCMD_SLOT 31 +#define NUM_SLOTS 32 + +struct cqhci_slot { + struct mmc_request *mrq; + unsigned int flags; +#define CQHCI_EXTERNAL_TIMEOUT BIT(0) +#define CQHCI_COMPLETED BIT(1) +#define CQHCI_HOST_CRC BIT(2) +#define CQHCI_HOST_TIMEOUT BIT(3) +#define CQHCI_HOST_OTHER BIT(4) +}; + +static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) +{ + return cq_host->desc_base + (tag * cq_host->slot_sz); +} + +static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag) +{ + u8 *desc = get_desc(cq_host, tag); + + return desc + cq_host->task_desc_len; +} + +static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) +{ + return cq_host->trans_desc_dma_base + + (cq_host->mmc->max_segs * tag * + cq_host->trans_desc_len); +} + +static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) +{ + return cq_host->trans_desc_base + + (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag); +} + +static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag) +{ + u8 *link_temp; + dma_addr_t trans_temp; + + link_temp = get_link_desc(cq_host, tag); + trans_temp = get_trans_desc_dma(cq_host, tag); + + memset(link_temp, 0, cq_host->link_desc_len); + if (cq_host->link_desc_len > 8) + *(link_temp + 8) = 0; + + if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { + *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1); + return; + } + + *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0); + + if (cq_host->dma64) { + __le64 *data_addr = (__le64 __force *)(link_temp + 4); + + data_addr[0] = cpu_to_le64(trans_temp); + } else { + __le32 *data_addr = (__le32 __force *)(link_temp + 4); + + data_addr[0] = cpu_to_le32(trans_temp); + } +} + +static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set) +{ + cqhci_writel(cq_host, set, CQHCI_ISTE); + cqhci_writel(cq_host, set, CQHCI_ISGE); +} + +#define DRV_NAME "cqhci" + +#define CQHCI_DUMP(f, x...) \ + pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x) + +static void cqhci_dumpregs(struct cqhci_host *cq_host) +{ + struct mmc_host *mmc = cq_host->mmc; + + CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n"); + + CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_CAP), + cqhci_readl(cq_host, CQHCI_VER)); + CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_CFG), + cqhci_readl(cq_host, CQHCI_CTL)); + CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_IS), + cqhci_readl(cq_host, CQHCI_ISTE)); + CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_ISGE), + cqhci_readl(cq_host, CQHCI_IC)); + CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_TDLBA), + cqhci_readl(cq_host, CQHCI_TDLBAU)); + CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_TDBR), + cqhci_readl(cq_host, CQHCI_TCN)); + CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_DQS), + cqhci_readl(cq_host, CQHCI_DPT)); + CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_TCLR), + cqhci_readl(cq_host, CQHCI_SSC1)); + CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_SSC2), + cqhci_readl(cq_host, CQHCI_CRDCT)); + CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_RMEM), + cqhci_readl(cq_host, CQHCI_TERRI)); + CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_CRI), + cqhci_readl(cq_host, CQHCI_CRA)); + + if (cq_host->ops->dumpregs) + cq_host->ops->dumpregs(mmc); + else + CQHCI_DUMP(": ===========================================\n"); +} + +/** + * The allocated descriptor table for task, link & transfer descritors + * looks like: + * |----------| + * |task desc | |->|----------| + * |----------| | |trans desc| + * |link desc-|->| |----------| + * |----------| . + * . . + * no. of slots max-segs + * . |----------| + * |----------| + * The idea here is to create the [task+trans] table and mark & point the + * link desc to the transfer desc table on a per slot basis. + */ +static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) +{ + int i = 0; + + /* task descriptor can be 64/128 bit irrespective of arch */ + if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { + cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) | + CQHCI_TASK_DESC_SZ, CQHCI_CFG); + cq_host->task_desc_len = 16; + } else { + cq_host->task_desc_len = 8; + } + + /* + * 96 bits length of transfer desc instead of 128 bits which means + * ADMA would expect next valid descriptor at the 96th bit + * or 128th bit + */ + if (cq_host->dma64) { + if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) + cq_host->trans_desc_len = 12; + else + cq_host->trans_desc_len = 16; + cq_host->link_desc_len = 16; + } else { + cq_host->trans_desc_len = 8; + cq_host->link_desc_len = 8; + } + + /* total size of a slot: 1 task & 1 transfer (link) */ + cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; + + cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; + + cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * + (cq_host->num_slots - 1); + + pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", + mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, + cq_host->slot_sz); + + /* + * allocate a dma-mapped chunk of memory for the descriptors + * allocate a dma-mapped chunk of memory for link descriptors + * setup each link-desc memory offset per slot-number to + * the descriptor table. + */ + cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), + cq_host->desc_size, + &cq_host->desc_dma_base, + GFP_KERNEL); + cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), + cq_host->data_size, + &cq_host->trans_desc_dma_base, + GFP_KERNEL); + if (!cq_host->desc_base || !cq_host->trans_desc_base) + return -ENOMEM; + + pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", + mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, + (unsigned long long)cq_host->desc_dma_base, + (unsigned long long)cq_host->trans_desc_dma_base); + + for (; i < (cq_host->num_slots); i++) + setup_trans_desc(cq_host, i); + + return 0; +} + +static void __cqhci_enable(struct cqhci_host *cq_host) +{ + struct mmc_host *mmc = cq_host->mmc; + u32 cqcfg; + + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + + /* Configuration must not be changed while enabled */ + if (cqcfg & CQHCI_ENABLE) { + cqcfg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + } + + cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ); + + if (mmc->caps2 & MMC_CAP2_CQE_DCMD) + cqcfg |= CQHCI_DCMD; + + if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) + cqcfg |= CQHCI_TASK_DESC_SZ; + + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), + CQHCI_TDLBA); + cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), + CQHCI_TDLBAU); + + cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); + + cqhci_set_irqs(cq_host, 0); + + cqcfg |= CQHCI_ENABLE; + + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + mmc->cqe_on = true; + + if (cq_host->ops->enable) + cq_host->ops->enable(mmc); + + /* Ensure all writes are done before interrupts are enabled */ + wmb(); + + cqhci_set_irqs(cq_host, CQHCI_IS_MASK); + + cq_host->activated = true; +} + +static void __cqhci_disable(struct cqhci_host *cq_host) +{ + u32 cqcfg; + + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + cqcfg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + cq_host->mmc->cqe_on = false; + + cq_host->activated = false; +} + +int cqhci_suspend(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + + if (cq_host->enabled) + __cqhci_disable(cq_host); + + return 0; +} +EXPORT_SYMBOL(cqhci_suspend); + +int cqhci_resume(struct mmc_host *mmc) +{ + /* Re-enable is done upon first request */ + return 0; +} +EXPORT_SYMBOL(cqhci_resume); + +static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + int err; + + if (cq_host->enabled) + return 0; + + cq_host->rca = card->rca; + + err = cqhci_host_alloc_tdl(cq_host); + if (err) + return err; + + __cqhci_enable(cq_host); + + cq_host->enabled = true; + +#ifdef DEBUG + cqhci_dumpregs(cq_host); +#endif + return 0; +} + +/* CQHCI is idle and should halt immediately, so set a small timeout */ +#define CQHCI_OFF_TIMEOUT 100 + +static void cqhci_off(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + ktime_t timeout; + bool timed_out; + u32 reg; + + if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) + return; + + if (cq_host->ops->disable) + cq_host->ops->disable(mmc, false); + + cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); + + timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT); + while (1) { + timed_out = ktime_compare(ktime_get(), timeout) > 0; + reg = cqhci_readl(cq_host, CQHCI_CTL); + if ((reg & CQHCI_HALT) || timed_out) + break; + } + + if (timed_out) + pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); + else + pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); + + mmc->cqe_on = false; +} + +static void cqhci_disable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + + if (!cq_host->enabled) + return; + + cqhci_off(mmc); + + __cqhci_disable(cq_host); + + dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, + cq_host->trans_desc_base, + cq_host->trans_desc_dma_base); + + dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size, + cq_host->desc_base, + cq_host->desc_dma_base); + + cq_host->trans_desc_base = NULL; + cq_host->desc_base = NULL; + + cq_host->enabled = false; +} + +static void cqhci_prep_task_desc(struct mmc_request *mrq, + u64 *data, bool intr) +{ + u32 req_flags = mrq->data->flags; + + *data = CQHCI_VALID(1) | + CQHCI_END(1) | + CQHCI_INT(intr) | + CQHCI_ACT(0x5) | + CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | + CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | + CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) | + CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) | + CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) | + CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) | + CQHCI_BLK_COUNT(mrq->data->blocks) | + CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); + + pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n", + mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data); +} + +static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) +{ + int sg_count; + struct mmc_data *data = mrq->data; + + if (!data) + return -EINVAL; + + sg_count = dma_map_sg(mmc_dev(host), data->sg, + data->sg_len, + (data->flags & MMC_DATA_WRITE) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (!sg_count) { + pr_err("%s: sg-len: %d\n", __func__, data->sg_len); + return -ENOMEM; + } + + return sg_count; +} + +static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, + bool dma64) +{ + __le32 *attr = (__le32 __force *)desc; + + *attr = (CQHCI_VALID(1) | + CQHCI_END(end ? 1 : 0) | + CQHCI_INT(0) | + CQHCI_ACT(0x4) | + CQHCI_DAT_LENGTH(len)); + + if (dma64) { + __le64 *dataddr = (__le64 __force *)(desc + 4); + + dataddr[0] = cpu_to_le64(addr); + } else { + __le32 *dataddr = (__le32 __force *)(desc + 4); + + dataddr[0] = cpu_to_le32(addr); + } +} + +static int cqhci_prep_tran_desc(struct mmc_request *mrq, + struct cqhci_host *cq_host, int tag) +{ + struct mmc_data *data = mrq->data; + int i, sg_count, len; + bool end = false; + bool dma64 = cq_host->dma64; + dma_addr_t addr; + u8 *desc; + struct scatterlist *sg; + + sg_count = cqhci_dma_map(mrq->host, mrq); + if (sg_count < 0) { + pr_err("%s: %s: unable to map sg lists, %d\n", + mmc_hostname(mrq->host), __func__, sg_count); + return sg_count; + } + + desc = get_trans_desc(cq_host, tag); + + for_each_sg(data->sg, sg, sg_count, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + + if ((i+1) == sg_count) + end = true; + cqhci_set_tran_desc(desc, addr, len, end, dma64); + desc += cq_host->trans_desc_len; + } + + return 0; +} + +static void cqhci_prep_dcmd_desc(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + u64 *task_desc = NULL; + u64 data = 0; + u8 resp_type; + u8 *desc; + __le64 *dataddr; + struct cqhci_host *cq_host = mmc->cqe_private; + u8 timing; + + if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { + resp_type = 0x0; + timing = 0x1; + } else { + if (mrq->cmd->flags & MMC_RSP_R1B) { + resp_type = 0x3; + timing = 0x0; + } else { + resp_type = 0x2; + timing = 0x1; + } + } + + task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot); + memset(task_desc, 0, cq_host->task_desc_len); + data |= (CQHCI_VALID(1) | + CQHCI_END(1) | + CQHCI_INT(1) | + CQHCI_QBAR(1) | + CQHCI_ACT(0x5) | + CQHCI_CMD_INDEX(mrq->cmd->opcode) | + CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type)); + *task_desc |= data; + desc = (u8 *)task_desc; + pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n", + mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); + dataddr = (__le64 __force *)(desc + 4); + dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); + +} + +static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data) { + dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, + (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE); + } +} + +static inline int cqhci_tag(struct mmc_request *mrq) +{ + return mrq->cmd ? DCMD_SLOT : mrq->tag; +} + +static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + int err = 0; + u64 data = 0; + u64 *task_desc = NULL; + int tag = cqhci_tag(mrq); + struct cqhci_host *cq_host = mmc->cqe_private; + unsigned long flags; + + if (!cq_host->enabled) { + pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc)); + return -EINVAL; + } + + /* First request after resume has to re-enable */ + if (!cq_host->activated) + __cqhci_enable(cq_host); + + if (!mmc->cqe_on) { + cqhci_writel(cq_host, 0, CQHCI_CTL); + mmc->cqe_on = true; + pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); + if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { + pr_err("%s: cqhci: CQE failed to exit halt state\n", + mmc_hostname(mmc)); + } + if (cq_host->ops->enable) + cq_host->ops->enable(mmc); + } + + if (mrq->data) { + task_desc = (__le64 __force *)get_desc(cq_host, tag); + cqhci_prep_task_desc(mrq, &data, 1); + *task_desc = cpu_to_le64(data); + err = cqhci_prep_tran_desc(mrq, cq_host, tag); + if (err) { + pr_err("%s: cqhci: failed to setup tx desc: %d\n", + mmc_hostname(mmc), err); + return err; + } + } else { + cqhci_prep_dcmd_desc(mmc, mrq); + } + + spin_lock_irqsave(&cq_host->lock, flags); + + if (cq_host->recovery_halt) { + err = -EBUSY; + goto out_unlock; + } + + cq_host->slot[tag].mrq = mrq; + cq_host->slot[tag].flags = 0; + + cq_host->qcnt += 1; + + cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); + if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) + pr_debug("%s: cqhci: doorbell not set for tag %d\n", + mmc_hostname(mmc), tag); +out_unlock: + spin_unlock_irqrestore(&cq_host->lock, flags); + + if (err) + cqhci_post_req(mmc, mrq); + + return err; +} + +static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq, + bool notify) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + + if (!cq_host->recovery_halt) { + cq_host->recovery_halt = true; + pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc)); + wake_up(&cq_host->wait_queue); + if (notify && mrq->recovery_notifier) + mrq->recovery_notifier(mrq); + } +} + +static unsigned int cqhci_error_flags(int error1, int error2) +{ + int error = error1 ? error1 : error2; + + switch (error) { + case -EILSEQ: + return CQHCI_HOST_CRC; + case -ETIMEDOUT: + return CQHCI_HOST_TIMEOUT; + default: + return CQHCI_HOST_OTHER; + } +} + +static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, + int data_error) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + struct cqhci_slot *slot; + u32 terri; + int tag; + + spin_lock(&cq_host->lock); + + terri = cqhci_readl(cq_host, CQHCI_TERRI); + + pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", + mmc_hostname(mmc), status, cmd_error, data_error, terri); + + /* Forget about errors when recovery has already been triggered */ + if (cq_host->recovery_halt) + goto out_unlock; + + if (!cq_host->qcnt) { + WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", + mmc_hostname(mmc), status, cmd_error, data_error, + terri); + goto out_unlock; + } + + if (CQHCI_TERRI_C_VALID(terri)) { + tag = CQHCI_TERRI_C_TASK(terri); + slot = &cq_host->slot[tag]; + if (slot->mrq) { + slot->flags = cqhci_error_flags(cmd_error, data_error); + cqhci_recovery_needed(mmc, slot->mrq, true); + } + } + + if (CQHCI_TERRI_D_VALID(terri)) { + tag = CQHCI_TERRI_D_TASK(terri); + slot = &cq_host->slot[tag]; + if (slot->mrq) { + slot->flags = cqhci_error_flags(data_error, cmd_error); + cqhci_recovery_needed(mmc, slot->mrq, true); + } + } + + if (!cq_host->recovery_halt) { + /* + * The only way to guarantee forward progress is to mark at + * least one task in error, so if none is indicated, pick one. + */ + for (tag = 0; tag < NUM_SLOTS; tag++) { + slot = &cq_host->slot[tag]; + if (!slot->mrq) + continue; + slot->flags = cqhci_error_flags(data_error, cmd_error); + cqhci_recovery_needed(mmc, slot->mrq, true); + break; + } + } + +out_unlock: + spin_unlock(&cq_host->lock); +} + +static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + struct cqhci_slot *slot = &cq_host->slot[tag]; + struct mmc_request *mrq = slot->mrq; + struct mmc_data *data; + + if (!mrq) { + WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n", + mmc_hostname(mmc), tag); + return; + } + + /* No completions allowed during recovery */ + if (cq_host->recovery_halt) { + slot->flags |= CQHCI_COMPLETED; + return; + } + + slot->mrq = NULL; + + cq_host->qcnt -= 1; + + data = mrq->data; + if (data) { + if (data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blksz * data->blocks; + } + + mmc_cqe_request_done(mmc, mrq); +} + +irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, + int data_error) +{ + u32 status; + unsigned long tag = 0, comp_status; + struct cqhci_host *cq_host = mmc->cqe_private; + + status = cqhci_readl(cq_host, CQHCI_IS); + cqhci_writel(cq_host, status, CQHCI_IS); + + pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); + + if ((status & CQHCI_IS_RED) || cmd_error || data_error) + cqhci_error_irq(mmc, status, cmd_error, data_error); + + if (status & CQHCI_IS_TCC) { + /* read TCN and complete the request */ + comp_status = cqhci_readl(cq_host, CQHCI_TCN); + cqhci_writel(cq_host, comp_status, CQHCI_TCN); + pr_debug("%s: cqhci: TCN: 0x%08lx\n", + mmc_hostname(mmc), comp_status); + + spin_lock(&cq_host->lock); + + for_each_set_bit(tag, &comp_status, cq_host->num_slots) { + /* complete the corresponding mrq */ + pr_debug("%s: cqhci: completing tag %lu\n", + mmc_hostname(mmc), tag); + cqhci_finish_mrq(mmc, tag); + } + + if (cq_host->waiting_for_idle && !cq_host->qcnt) { + cq_host->waiting_for_idle = false; + wake_up(&cq_host->wait_queue); + } + + spin_unlock(&cq_host->lock); + } + + if (status & CQHCI_IS_TCL) + wake_up(&cq_host->wait_queue); + + if (status & CQHCI_IS_HAC) + wake_up(&cq_host->wait_queue); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(cqhci_irq); + +static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret) +{ + unsigned long flags; + bool is_idle; + + spin_lock_irqsave(&cq_host->lock, flags); + is_idle = !cq_host->qcnt || cq_host->recovery_halt; + *ret = cq_host->recovery_halt ? -EBUSY : 0; + cq_host->waiting_for_idle = !is_idle; + spin_unlock_irqrestore(&cq_host->lock, flags); + + return is_idle; +} + +static int cqhci_wait_for_idle(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + int ret; + + wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); + + return ret; +} + +static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq, + bool *recovery_needed) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + int tag = cqhci_tag(mrq); + struct cqhci_slot *slot = &cq_host->slot[tag]; + unsigned long flags; + bool timed_out; + + spin_lock_irqsave(&cq_host->lock, flags); + timed_out = slot->mrq == mrq; + if (timed_out) { + slot->flags |= CQHCI_EXTERNAL_TIMEOUT; + cqhci_recovery_needed(mmc, mrq, false); + *recovery_needed = cq_host->recovery_halt; + } + spin_unlock_irqrestore(&cq_host->lock, flags); + + if (timed_out) { + pr_err("%s: cqhci: timeout for tag %d\n", + mmc_hostname(mmc), tag); + cqhci_dumpregs(cq_host); + } + + return timed_out; +} + +static bool cqhci_tasks_cleared(struct cqhci_host *cq_host) +{ + return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS); +} + +static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + bool ret; + u32 ctl; + + cqhci_set_irqs(cq_host, CQHCI_IS_TCL); + + ctl = cqhci_readl(cq_host, CQHCI_CTL); + ctl |= CQHCI_CLEAR_ALL_TASKS; + cqhci_writel(cq_host, ctl, CQHCI_CTL); + + wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), + msecs_to_jiffies(timeout) + 1); + + cqhci_set_irqs(cq_host, 0); + + ret = cqhci_tasks_cleared(cq_host); + + if (!ret) + pr_debug("%s: cqhci: Failed to clear tasks\n", + mmc_hostname(mmc)); + + return ret; +} + +static bool cqhci_halted(struct cqhci_host *cq_host) +{ + return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; +} + +static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + bool ret; + u32 ctl; + + if (cqhci_halted(cq_host)) + return true; + + cqhci_set_irqs(cq_host, CQHCI_IS_HAC); + + ctl = cqhci_readl(cq_host, CQHCI_CTL); + ctl |= CQHCI_HALT; + cqhci_writel(cq_host, ctl, CQHCI_CTL); + + wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), + msecs_to_jiffies(timeout) + 1); + + cqhci_set_irqs(cq_host, 0); + + ret = cqhci_halted(cq_host); + + if (!ret) + pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); + + return ret; +} + +/* + * After halting we expect to be able to use the command line. We interpret the + * failure to halt to mean the data lines might still be in use (and the upper + * layers will need to send a STOP command), so we set the timeout based on a + * generous command timeout. + */ +#define CQHCI_START_HALT_TIMEOUT 5 + +static void cqhci_recovery_start(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + + pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); + + WARN_ON(!cq_host->recovery_halt); + + cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT); + + if (cq_host->ops->disable) + cq_host->ops->disable(mmc, true); + + mmc->cqe_on = false; +} + +static int cqhci_error_from_flags(unsigned int flags) +{ + if (!flags) + return 0; + + /* CRC errors might indicate re-tuning so prefer to report that */ + if (flags & CQHCI_HOST_CRC) + return -EILSEQ; + + if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT)) + return -ETIMEDOUT; + + return -EIO; +} + +static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag) +{ + struct cqhci_slot *slot = &cq_host->slot[tag]; + struct mmc_request *mrq = slot->mrq; + struct mmc_data *data; + + if (!mrq) + return; + + slot->mrq = NULL; + + cq_host->qcnt -= 1; + + data = mrq->data; + if (data) { + data->bytes_xfered = 0; + data->error = cqhci_error_from_flags(slot->flags); + } else { + mrq->cmd->error = cqhci_error_from_flags(slot->flags); + } + + mmc_cqe_request_done(cq_host->mmc, mrq); +} + +static void cqhci_recover_mrqs(struct cqhci_host *cq_host) +{ + int i; + + for (i = 0; i < cq_host->num_slots; i++) + cqhci_recover_mrq(cq_host, i); +} + +/* + * By now the command and data lines should be unused so there is no reason for + * CQHCI to take a long time to halt, but if it doesn't halt there could be + * problems clearing tasks, so be generous. + */ +#define CQHCI_FINISH_HALT_TIMEOUT 20 + +/* CQHCI could be expected to clear it's internal state pretty quickly */ +#define CQHCI_CLEAR_TIMEOUT 20 + +static void cqhci_recovery_finish(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + unsigned long flags; + u32 cqcfg; + bool ok; + + pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); + + WARN_ON(!cq_host->recovery_halt); + + ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); + + if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) + ok = false; + + /* + * The specification contradicts itself, by saying that tasks cannot be + * cleared if CQHCI does not halt, but if CQHCI does not halt, it should + * be disabled/re-enabled, but not to disable before clearing tasks. + * Have a go anyway. + */ + if (!ok) { + pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + cqcfg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + cqcfg |= CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + /* Be sure that there are no tasks */ + ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); + if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) + ok = false; + WARN_ON(!ok); + } + + cqhci_recover_mrqs(cq_host); + + WARN_ON(cq_host->qcnt); + + spin_lock_irqsave(&cq_host->lock, flags); + cq_host->qcnt = 0; + cq_host->recovery_halt = false; + mmc->cqe_on = false; + spin_unlock_irqrestore(&cq_host->lock, flags); + + /* Ensure all writes are done before interrupts are re-enabled */ + wmb(); + + cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS); + + cqhci_set_irqs(cq_host, CQHCI_IS_MASK); + + pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc)); +} + +static const struct mmc_cqe_ops cqhci_cqe_ops = { + .cqe_enable = cqhci_enable, + .cqe_disable = cqhci_disable, + .cqe_request = cqhci_request, + .cqe_post_req = cqhci_post_req, + .cqe_off = cqhci_off, + .cqe_wait_for_idle = cqhci_wait_for_idle, + .cqe_timeout = cqhci_timeout, + .cqe_recovery_start = cqhci_recovery_start, + .cqe_recovery_finish = cqhci_recovery_finish, +}; + +struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev) +{ + struct cqhci_host *cq_host; + struct resource *cqhci_memres = NULL; + + /* check and setup CMDQ interface */ + cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "cqhci_mem"); + if (!cqhci_memres) { + dev_dbg(&pdev->dev, "CMDQ not supported\n"); + return ERR_PTR(-EINVAL); + } + + cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) + return ERR_PTR(-ENOMEM); + cq_host->mmio = devm_ioremap(&pdev->dev, + cqhci_memres->start, + resource_size(cqhci_memres)); + if (!cq_host->mmio) { + dev_err(&pdev->dev, "failed to remap cqhci regs\n"); + return ERR_PTR(-EBUSY); + } + dev_dbg(&pdev->dev, "CMDQ ioremap: done\n"); + + return cq_host; +} +EXPORT_SYMBOL(cqhci_pltfm_init); + +static unsigned int cqhci_ver_major(struct cqhci_host *cq_host) +{ + return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER)); +} + +static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host) +{ + u32 ver = cqhci_readl(cq_host, CQHCI_VER); + + return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver); +} + +int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, + bool dma64) +{ + int err; + + cq_host->dma64 = dma64; + cq_host->mmc = mmc; + cq_host->mmc->cqe_private = cq_host; + + cq_host->num_slots = NUM_SLOTS; + cq_host->dcmd_slot = DCMD_SLOT; + + mmc->cqe_ops = &cqhci_cqe_ops; + + mmc->cqe_qdepth = NUM_SLOTS; + if (mmc->caps2 & MMC_CAP2_CQE_DCMD) + mmc->cqe_qdepth -= 1; + + cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots, + sizeof(*cq_host->slot), GFP_KERNEL); + if (!cq_host->slot) { + err = -ENOMEM; + goto out_err; + } + + spin_lock_init(&cq_host->lock); + + init_completion(&cq_host->halt_comp); + init_waitqueue_head(&cq_host->wait_queue); + + pr_info("%s: CQHCI version %u.%02u\n", + mmc_hostname(mmc), cqhci_ver_major(cq_host), + cqhci_ver_minor(cq_host)); + + return 0; + +out_err: + pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n", + mmc_hostname(mmc), cqhci_ver_major(cq_host), + cqhci_ver_minor(cq_host), err); + return err; +} +EXPORT_SYMBOL(cqhci_init); + +MODULE_AUTHOR("Venkat Gopalakrishnan "); +MODULE_DESCRIPTION("Command Queue Host Controller Interface driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h new file mode 100644 index 000000000000..2d39d361b322 --- /dev/null +++ b/drivers/mmc/host/cqhci.h @@ -0,0 +1,240 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef LINUX_MMC_CQHCI_H +#define LINUX_MMC_CQHCI_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* registers */ +/* version */ +#define CQHCI_VER 0x00 +#define CQHCI_VER_MAJOR(x) (((x) & GENMASK(11, 8)) >> 8) +#define CQHCI_VER_MINOR1(x) (((x) & GENMASK(7, 4)) >> 4) +#define CQHCI_VER_MINOR2(x) ((x) & GENMASK(3, 0)) + +/* capabilities */ +#define CQHCI_CAP 0x04 +/* configuration */ +#define CQHCI_CFG 0x08 +#define CQHCI_DCMD 0x00001000 +#define CQHCI_TASK_DESC_SZ 0x00000100 +#define CQHCI_ENABLE 0x00000001 + +/* control */ +#define CQHCI_CTL 0x0C +#define CQHCI_CLEAR_ALL_TASKS 0x00000100 +#define CQHCI_HALT 0x00000001 + +/* interrupt status */ +#define CQHCI_IS 0x10 +#define CQHCI_IS_HAC BIT(0) +#define CQHCI_IS_TCC BIT(1) +#define CQHCI_IS_RED BIT(2) +#define CQHCI_IS_TCL BIT(3) + +#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED) + +/* interrupt status enable */ +#define CQHCI_ISTE 0x14 + +/* interrupt signal enable */ +#define CQHCI_ISGE 0x18 + +/* interrupt coalescing */ +#define CQHCI_IC 0x1C +#define CQHCI_IC_ENABLE BIT(31) +#define CQHCI_IC_RESET BIT(16) +#define CQHCI_IC_ICCTHWEN BIT(15) +#define CQHCI_IC_ICCTH(x) ((x & 0x1F) << 8) +#define CQHCI_IC_ICTOVALWEN BIT(7) +#define CQHCI_IC_ICTOVAL(x) (x & 0x7F) + +/* task list base address */ +#define CQHCI_TDLBA 0x20 + +/* task list base address upper */ +#define CQHCI_TDLBAU 0x24 + +/* door-bell */ +#define CQHCI_TDBR 0x28 + +/* task completion notification */ +#define CQHCI_TCN 0x2C + +/* device queue status */ +#define CQHCI_DQS 0x30 + +/* device pending tasks */ +#define CQHCI_DPT 0x34 + +/* task clear */ +#define CQHCI_TCLR 0x38 + +/* send status config 1 */ +#define CQHCI_SSC1 0x40 + +/* send status config 2 */ +#define CQHCI_SSC2 0x44 + +/* response for dcmd */ +#define CQHCI_CRDCT 0x48 + +/* response mode error mask */ +#define CQHCI_RMEM 0x50 + +/* task error info */ +#define CQHCI_TERRI 0x54 + +#define CQHCI_TERRI_C_INDEX(x) ((x) & GENMASK(5, 0)) +#define CQHCI_TERRI_C_TASK(x) (((x) & GENMASK(12, 8)) >> 8) +#define CQHCI_TERRI_C_VALID(x) ((x) & BIT(15)) +#define CQHCI_TERRI_D_INDEX(x) (((x) & GENMASK(21, 16)) >> 16) +#define CQHCI_TERRI_D_TASK(x) (((x) & GENMASK(28, 24)) >> 24) +#define CQHCI_TERRI_D_VALID(x) ((x) & BIT(31)) + +/* command response index */ +#define CQHCI_CRI 0x58 + +/* command response argument */ +#define CQHCI_CRA 0x5C + +#define CQHCI_INT_ALL 0xF +#define CQHCI_IC_DEFAULT_ICCTH 31 +#define CQHCI_IC_DEFAULT_ICTOVAL 1 + +/* attribute fields */ +#define CQHCI_VALID(x) ((x & 1) << 0) +#define CQHCI_END(x) ((x & 1) << 1) +#define CQHCI_INT(x) ((x & 1) << 2) +#define CQHCI_ACT(x) ((x & 0x7) << 3) + +/* data command task descriptor fields */ +#define CQHCI_FORCED_PROG(x) ((x & 1) << 6) +#define CQHCI_CONTEXT(x) ((x & 0xF) << 7) +#define CQHCI_DATA_TAG(x) ((x & 1) << 11) +#define CQHCI_DATA_DIR(x) ((x & 1) << 12) +#define CQHCI_PRIORITY(x) ((x & 1) << 13) +#define CQHCI_QBAR(x) ((x & 1) << 14) +#define CQHCI_REL_WRITE(x) ((x & 1) << 15) +#define CQHCI_BLK_COUNT(x) ((x & 0xFFFF) << 16) +#define CQHCI_BLK_ADDR(x) ((x & 0xFFFFFFFF) << 32) + +/* direct command task descriptor fields */ +#define CQHCI_CMD_INDEX(x) ((x & 0x3F) << 16) +#define CQHCI_CMD_TIMING(x) ((x & 1) << 22) +#define CQHCI_RESP_TYPE(x) ((x & 0x3) << 23) + +/* transfer descriptor fields */ +#define CQHCI_DAT_LENGTH(x) ((x & 0xFFFF) << 16) +#define CQHCI_DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32) +#define CQHCI_DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0) + +struct cqhci_host_ops; +struct mmc_host; +struct cqhci_slot; + +struct cqhci_host { + const struct cqhci_host_ops *ops; + void __iomem *mmio; + struct mmc_host *mmc; + + spinlock_t lock; + + /* relative card address of device */ + unsigned int rca; + + /* 64 bit DMA */ + bool dma64; + int num_slots; + int qcnt; + + u32 dcmd_slot; + u32 caps; +#define CQHCI_TASK_DESC_SZ_128 0x1 + + u32 quirks; +#define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1 + + bool enabled; + bool halted; + bool init_done; + bool activated; + bool waiting_for_idle; + bool recovery_halt; + + size_t desc_size; + size_t data_size; + + u8 *desc_base; + + /* total descriptor size */ + u8 slot_sz; + + /* 64/128 bit depends on CQHCI_CFG */ + u8 task_desc_len; + + /* 64 bit on 32-bit arch, 128 bit on 64-bit */ + u8 link_desc_len; + + u8 *trans_desc_base; + /* same length as transfer descriptor */ + u8 trans_desc_len; + + dma_addr_t desc_dma_base; + dma_addr_t trans_desc_dma_base; + + struct completion halt_comp; + wait_queue_head_t wait_queue; + struct cqhci_slot *slot; +}; + +struct cqhci_host_ops { + void (*dumpregs)(struct mmc_host *mmc); + void (*write_l)(struct cqhci_host *host, u32 val, int reg); + u32 (*read_l)(struct cqhci_host *host, int reg); + void (*enable)(struct mmc_host *mmc); + void (*disable)(struct mmc_host *mmc, bool recovery); +}; + +static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg) +{ + if (unlikely(host->ops->write_l)) + host->ops->write_l(host, val, reg); + else + writel_relaxed(val, host->mmio + reg); +} + +static inline u32 cqhci_readl(struct cqhci_host *host, int reg) +{ + if (unlikely(host->ops->read_l)) + return host->ops->read_l(host, reg); + else + return readl_relaxed(host->mmio + reg); +} + +struct platform_device; + +irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, + int data_error); +int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64); +struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev); +int cqhci_suspend(struct mmc_host *mmc); +int cqhci_resume(struct mmc_host *mmc); + +#endif From 8ee82bda230fc972c7ee3bb15ce1260eefb4721c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:06 +0200 Subject: [PATCH 15/96] mmc: sdhci-pci: Add CQHCI support for Intel GLK Add CQHCI initialization and implement CQHCI operations for Intel GLK. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/host/Kconfig | 1 + drivers/mmc/host/sdhci-pci-core.c | 155 +++++++++++++++++++++++++++++- 2 files changed, 155 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 3092b7085cb5..2b02a9788bb6 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -81,6 +81,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER config MMC_SDHCI_PCI tristate "SDHCI support on PCI bus" depends on MMC_SDHCI && PCI + select MMC_CQHCI help This selects the PCI Secure Digital Host Controller Interface. Most controllers found today are PCI devices. diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 3e4f04fd5175..110c634cfb43 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -30,6 +30,8 @@ #include #include +#include "cqhci.h" + #include "sdhci.h" #include "sdhci-pci.h" @@ -116,6 +118,28 @@ int sdhci_pci_resume_host(struct sdhci_pci_chip *chip) return 0; } + +static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip) +{ + int ret; + + ret = cqhci_suspend(chip->slots[0]->host->mmc); + if (ret) + return ret; + + return sdhci_pci_suspend_host(chip); +} + +static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip) +{ + int ret; + + ret = sdhci_pci_resume_host(chip); + if (ret) + return ret; + + return cqhci_resume(chip->slots[0]->host->mmc); +} #endif #ifdef CONFIG_PM @@ -166,8 +190,48 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip) return 0; } + +static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip) +{ + int ret; + + ret = cqhci_suspend(chip->slots[0]->host->mmc); + if (ret) + return ret; + + return sdhci_pci_runtime_suspend_host(chip); +} + +static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip) +{ + int ret; + + ret = sdhci_pci_runtime_resume_host(chip); + if (ret) + return ret; + + return cqhci_resume(chip->slots[0]->host->mmc); +} #endif +static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static void sdhci_pci_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + /*****************************************************************************\ * * * Hardware specific quirk handling * @@ -583,6 +647,18 @@ static const struct sdhci_ops sdhci_intel_byt_ops = { .voltage_switch = sdhci_intel_voltage_switch, }; +static const struct sdhci_ops sdhci_intel_glk_ops = { + .set_clock = sdhci_set_clock, + .set_power = sdhci_intel_set_power, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .hw_reset = sdhci_pci_hw_reset, + .voltage_switch = sdhci_intel_voltage_switch, + .irq = sdhci_cqhci_irq, +}; + static void byt_read_dsm(struct sdhci_pci_slot *slot) { struct intel_host *intel_host = sdhci_pci_priv(slot); @@ -612,15 +688,83 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) { int ret = byt_emmc_probe_slot(slot); + slot->host->mmc->caps2 |= MMC_CAP2_CQE; + if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES, slot->host->mmc_host_ops.hs400_enhanced_strobe = intel_hs400_enhanced_strobe; + slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; } return ret; } +static void glk_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + /* + * CQE gets stuck if it sees Buffer Read Enable bit set, which can be + * the case after tuning, so ensure the buffer is drained. + */ + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + } + + sdhci_cqe_enable(mmc); +} + +static const struct cqhci_host_ops glk_cqhci_ops = { + .enable = glk_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_pci_dumpregs, +}; + +static int glk_emmc_add_host(struct sdhci_pci_slot *slot) +{ + struct device *dev = &slot->chip->pdev->dev; + struct sdhci_host *host = slot->host; + struct cqhci_host *cq_host; + bool dma64; + int ret; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + 0x200; + cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; + cq_host->ops = &glk_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + #ifdef CONFIG_ACPI static int ni_set_max_freq(struct sdhci_pci_slot *slot) { @@ -699,11 +843,20 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { .allow_runtime_pm = true, .probe_slot = glk_emmc_probe_slot, + .add_host = glk_emmc_add_host, +#ifdef CONFIG_PM_SLEEP + .suspend = sdhci_cqhci_suspend, + .resume = sdhci_cqhci_resume, +#endif +#ifdef CONFIG_PM + .runtime_suspend = sdhci_cqhci_runtime_suspend, + .runtime_resume = sdhci_cqhci_runtime_resume, +#endif .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | SDHCI_QUIRK2_STOP_WITH_TC, - .ops = &sdhci_intel_byt_ops, + .ops = &sdhci_intel_glk_ops, .priv_size = sizeof(struct intel_host), }; From 10f21df4a23540b5da8e88d1030ff8c37818e04f Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:07 +0200 Subject: [PATCH 16/96] mmc: block: blk-mq: Add support for direct completion For blk-mq, add support for completing requests directly in the ->done callback. That means that error handling and urgent background operations must be handled by recovery_work in that case. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 129 +++++++++++++++++++++++++++++++-------- drivers/mmc/core/block.h | 1 + drivers/mmc/core/host.h | 5 ++ drivers/mmc/core/queue.c | 5 +- drivers/mmc/core/queue.h | 1 + include/linux/mmc/host.h | 1 + 6 files changed, 116 insertions(+), 26 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 7275ac5d6799..a710a6e95307 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2131,6 +2131,22 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) } } +static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) +{ + mmc_blk_eval_resp_error(brq); + + return brq->sbc.error || brq->cmd.error || brq->stop.error || + brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; +} + +static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, + struct request *req) +{ + int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; + + mmc_blk_reset_success(mq->blkdata, type); +} + static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); @@ -2213,14 +2229,43 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) mmc_post_req(host, mrq, 0); - blk_mq_complete_request(req); + /* + * Block layer timeouts race with completions which means the normal + * completion path cannot be used during recovery. + */ + if (mq->in_recovery) + mmc_blk_mq_complete_rq(mq, req); + else + blk_mq_complete_request(req); mmc_blk_mq_dec_in_flight(mq, req); } +void mmc_blk_mq_recovery(struct mmc_queue *mq) +{ + struct request *req = mq->recovery_req; + struct mmc_host *host = mq->card->host; + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + + mq->recovery_req = NULL; + mq->rw_wait = false; + + if (mmc_blk_rq_error(&mqrq->brq)) { + mmc_retune_hold_now(host); + mmc_blk_mq_rw_recovery(mq, req); + } + + mmc_blk_urgent_bkops(mq, mqrq); + + mmc_blk_mq_post_req(mq, req); +} + static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, struct request **prev_req) { + if (mmc_host_done_complete(mq->card->host)) + return; + mutex_lock(&mq->complete_lock); if (!mq->complete_req) @@ -2254,29 +2299,56 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) struct request *req = mmc_queue_req_to_req(mqrq); struct request_queue *q = req->q; struct mmc_queue *mq = q->queuedata; + struct mmc_host *host = mq->card->host; unsigned long flags; - bool waiting; - /* - * We cannot complete the request in this context, so record that there - * is a request to complete, and that a following request does not need - * to wait (although it does need to complete complete_req first). - */ - spin_lock_irqsave(q->queue_lock, flags); - mq->complete_req = req; - mq->rw_wait = false; - waiting = mq->waiting; - spin_unlock_irqrestore(q->queue_lock, flags); + if (!mmc_host_done_complete(host)) { + bool waiting; - /* - * If 'waiting' then the waiting task will complete this request, - * otherwise queue a work to do it. Note that complete_work may still - * race with the dispatch of a following request. - */ - if (waiting) + /* + * We cannot complete the request in this context, so record + * that there is a request to complete, and that a following + * request does not need to wait (although it does need to + * complete complete_req first). + */ + spin_lock_irqsave(q->queue_lock, flags); + mq->complete_req = req; + mq->rw_wait = false; + waiting = mq->waiting; + spin_unlock_irqrestore(q->queue_lock, flags); + + /* + * If 'waiting' then the waiting task will complete this + * request, otherwise queue a work to do it. Note that + * complete_work may still race with the dispatch of a following + * request. + */ + if (waiting) + wake_up(&mq->wait); + else + kblockd_schedule_work(&mq->complete_work); + + return; + } + + /* Take the recovery path for errors or urgent background operations */ + if (mmc_blk_rq_error(&mqrq->brq) || + mmc_blk_urgent_bkops_needed(mq, mqrq)) { + spin_lock_irqsave(q->queue_lock, flags); + mq->recovery_needed = true; + mq->recovery_req = req; + spin_unlock_irqrestore(q->queue_lock, flags); wake_up(&mq->wait); - else - kblockd_schedule_work(&mq->complete_work); + schedule_work(&mq->recovery_work); + return; + } + + mmc_blk_rw_reset_success(mq, req); + + mq->rw_wait = false; + wake_up(&mq->wait); + + mmc_blk_mq_post_req(mq, req); } static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) @@ -2286,11 +2358,16 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) bool done; /* - * Wait while there is another request in progress. Also indicate that - * there is a request waiting to start. + * Wait while there is another request in progress, but not if recovery + * is needed. Also indicate whether there is a request waiting to start. */ spin_lock_irqsave(q->queue_lock, flags); - done = !mq->rw_wait; + if (mq->recovery_needed) { + *err = -EBUSY; + done = true; + } else { + done = !mq->rw_wait; + } mq->waiting = !done; spin_unlock_irqrestore(q->queue_lock, flags); @@ -2334,10 +2411,12 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, if (prev_req) mmc_blk_mq_post_req(mq, prev_req); - if (err) { + if (err) mq->rw_wait = false; + + /* Release re-tuning here where there is no synchronization required */ + if (err || mmc_host_done_complete(host)) mmc_retune_release(host); - } out_post_req: if (err) diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h index f472ce5d5647..b126418fd163 100644 --- a/drivers/mmc/core/block.h +++ b/drivers/mmc/core/block.h @@ -13,6 +13,7 @@ enum mmc_issued; enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req); void mmc_blk_mq_complete(struct request *req); +void mmc_blk_mq_recovery(struct mmc_queue *mq); struct work_struct; diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index 6eaf558e62d6..8ca284e079e3 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -41,6 +41,11 @@ static inline int mmc_host_cmd23(struct mmc_host *host) return host->caps & MMC_CAP_CMD23; } +static inline bool mmc_host_done_complete(struct mmc_host *host) +{ + return host->caps & MMC_CAP_DONE_COMPLETE; +} + static inline int mmc_boot_partition_access(struct mmc_host *host) { return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 8d632d2f5199..d8394007bc99 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -165,7 +165,10 @@ static void mmc_mq_recovery_handler(struct work_struct *work) mq->in_recovery = true; - mmc_blk_cqe_recovery(mq); + if (mq->use_cqe) + mmc_blk_cqe_recovery(mq); + else + mmc_blk_mq_recovery(mq); mq->in_recovery = false; diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 1d7d3b0afff8..34f601c6dd39 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -103,6 +103,7 @@ struct mmc_queue { bool waiting; struct work_struct recovery_work; wait_queue_head_t wait; + struct request *recovery_req; struct request *complete_req; struct mutex complete_lock; struct work_struct complete_work; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index ce2075d6f429..f3e13c50f6b0 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -324,6 +324,7 @@ struct mmc_host { #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ +#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */ #define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ From 88a516461ee07a994c0e7016faf85f3466de1d09 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:08 +0200 Subject: [PATCH 17/96] mmc: block: blk-mq: Separate card polling from recovery Recovery is simpler to understand if it is only used for errors. Create a separate function for card polling. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index a710a6e95307..6d2c42c1c33a 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2139,6 +2139,26 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; } +static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + bool gen_err = false; + int err; + + if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) + return 0; + + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, &gen_err); + + /* Copy the general error bit so it will be seen later on */ + if (gen_err) { + mqrq->brq.stop.resp[0] |= R1_ERROR; + err = err ? err : -EIO; + } + + return err; +} + static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, struct request *req) { @@ -2197,8 +2217,15 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_host *host = mq->card->host; - mmc_blk_mq_rw_recovery(mq, req); + if (mmc_blk_rq_error(&mqrq->brq) || + mmc_blk_card_busy(mq->card, req)) { + mmc_blk_mq_rw_recovery(mq, req); + } else { + mmc_blk_rw_reset_success(mq, req); + mmc_retune_release(host); + } mmc_blk_urgent_bkops(mq, mqrq); } From c89b4851c67fb7354862850ae181de883269487d Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:09 +0200 Subject: [PATCH 18/96] mmc: block: Make card_busy_detect() accumulate all response error bits Make card_busy_detect() accumulate all response error bits. Later patches will make use of this. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 6d2c42c1c33a..30fc012353ae 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -923,7 +923,8 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) } static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, - bool hw_busy_detect, struct request *req, bool *gen_err) + bool hw_busy_detect, struct request *req, + u32 *resp_errs) { unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); int err = 0; @@ -937,11 +938,9 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, return err; } - if (status & R1_ERROR) { - pr_err("%s: %s: error sending status cmd, status %#x\n", - req->rq_disk->disk_name, __func__, status); - *gen_err = true; - } + /* Accumulate any response error bits seen */ + if (resp_errs) + *resp_errs |= status; /* We may rely on the host hw to handle busy detection.*/ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && @@ -970,6 +969,24 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, return err; } +static int card_busy_detect_err(struct mmc_card *card, unsigned int timeout_ms, + bool hw_busy_detect, struct request *req, + bool *gen_err) +{ + u32 resp_errs = 0; + int err; + + err = card_busy_detect(card, timeout_ms, hw_busy_detect, req, + &resp_errs); + if (resp_errs & R1_ERROR) { + pr_err("%s: %s: error sending status cmd, status %#x\n", + req->rq_disk->disk_name, __func__, resp_errs); + *gen_err = true; + } + + return err; +} + static int send_stop(struct mmc_card *card, unsigned int timeout_ms, struct request *req, bool *gen_err, u32 *stop_status) { @@ -1012,7 +1029,8 @@ static int send_stop(struct mmc_card *card, unsigned int timeout_ms, *gen_err = true; } - return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); + return card_busy_detect_err(card, timeout_ms, use_r1b_resp, req, + gen_err); } #define ERR_NOMEDIUM 3 @@ -1553,8 +1571,8 @@ static enum mmc_blk_status __mmc_blk_err_check(struct mmc_card *card, gen_err = true; } - err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, - &gen_err); + err = card_busy_detect_err(card, MMC_BLK_TIMEOUT_MS, false, req, + &gen_err); if (err) return MMC_BLK_CMD_ERR; } @@ -2148,7 +2166,8 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) return 0; - err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, &gen_err); + err = card_busy_detect_err(card, MMC_BLK_TIMEOUT_MS, false, req, + &gen_err); /* Copy the general error bit so it will be seen later on */ if (gen_err) { From f47a1fe346b1568df0e9b158574b2939432313df Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:10 +0200 Subject: [PATCH 19/96] mmc: block: blk-mq: Check error bits and save the exception bit when polling card busy Check error bits and save the exception bit when polling card busy. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 30fc012353ae..c446d17b48c4 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1457,15 +1457,18 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, } } -#define CMD_ERRORS \ - (R1_OUT_OF_RANGE | /* Command argument out of range */ \ - R1_ADDRESS_ERROR | /* Misaligned address */ \ +#define CMD_ERRORS_EXCL_OOR \ + (R1_ADDRESS_ERROR | /* Misaligned address */ \ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ R1_WP_VIOLATION | /* Tried to write to protected block */ \ R1_CARD_ECC_FAILED | /* Card ECC failed */ \ R1_CC_ERROR | /* Card controller error */ \ R1_ERROR) /* General/unknown error */ +#define CMD_ERRORS \ + (CMD_ERRORS_EXCL_OOR | \ + R1_OUT_OF_RANGE) /* Command argument out of range */ \ + static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) { u32 val; @@ -2157,24 +2160,40 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; } +static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) +{ + return !!brq->mrq.sbc; +} + +static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) +{ + return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; +} + static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); - bool gen_err = false; + u32 status = 0; int err; if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) return 0; - err = card_busy_detect_err(card, MMC_BLK_TIMEOUT_MS, false, req, - &gen_err); + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, &status); - /* Copy the general error bit so it will be seen later on */ - if (gen_err) { - mqrq->brq.stop.resp[0] |= R1_ERROR; + /* + * Do not assume data transferred correctly if there are any error bits + * set. + */ + if (status & mmc_blk_stop_err_bits(&mqrq->brq)) { + mqrq->brq.data.bytes_xfered = 0; err = err ? err : -EIO; } + /* Copy the exception bit so it will be seen later on */ + if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT) + mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; + return err; } From 7701885e56cee3de4447c0653f9059b62844983b Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:11 +0200 Subject: [PATCH 20/96] mmc: block: Check the timeout correctly in card_busy_detect() Pedantically, ensure the status is checked for the last time after the full timeout has passed. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index c446d17b48c4..f7c387c27ac0 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -931,6 +931,8 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, u32 status; do { + bool done = time_after(jiffies, timeout); + err = __mmc_send_status(card, &status, 5); if (err) { pr_err("%s: error %d requesting status\n", @@ -951,7 +953,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, * Timeout if the device never becomes ready for data and never * leaves the program state. */ - if (time_after(jiffies, timeout)) { + if (done) { pr_err("%s: Card stuck in programming state! %s %s\n", mmc_hostname(card->host), req->rq_disk->disk_name, __func__); From 0987c6b046e199b9b922a585c62e9503486fe0bc Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:12 +0200 Subject: [PATCH 21/96] mmc: block: Check for transfer state in card_busy_detect() The card is required to return to transfer state. Since that is the state required to start another transfer, check for that state instead of programming state. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index f7c387c27ac0..0b40fc2ebf77 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -922,6 +922,16 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) return 0; } +static inline bool mmc_blk_in_tran_state(u32 status) +{ + /* + * Some cards mishandle the status bits, so make sure to check both the + * busy indication and the card state. + */ + return status & R1_READY_FOR_DATA && + (R1_CURRENT_STATE(status) == R1_STATE_TRAN); +} + static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, bool hw_busy_detect, struct request *req, u32 *resp_errs) @@ -954,9 +964,9 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, * leaves the program state. */ if (done) { - pr_err("%s: Card stuck in programming state! %s %s\n", + pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n", mmc_hostname(card->host), - req->rq_disk->disk_name, __func__); + req->rq_disk->disk_name, __func__, status); return -ETIMEDOUT; } @@ -965,8 +975,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, * so make sure to check both the busy * indication and the card state. */ - } while (!(status & R1_READY_FOR_DATA) || - (R1_CURRENT_STATE(status) == R1_STATE_PRG)); + } while (!mmc_blk_in_tran_state(status)); return err; } From 92c0a0cc9483c6b9cc1b61273d30a0a601cb5e15 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:13 +0200 Subject: [PATCH 22/96] mmc: block: Add timeout_clks when calculating timeout According to the specification, total access time is derived from both TAAC and NSAC, which means the timeout should add both timeout_ns and timeout_clks. Host drivers do that, so make the block driver do that too. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 42 +++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 0b40fc2ebf77..46e63aec1fcb 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -922,6 +922,34 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) return 0; } +static unsigned int mmc_blk_clock_khz(struct mmc_host *host) +{ + if (host->actual_clock) + return host->actual_clock / 1000; + + /* Clock may be subject to a divisor, fudge it by a factor of 2. */ + if (host->ios.clock) + return host->ios.clock / 2000; + + /* How can there be no clock */ + WARN_ON_ONCE(1); + return 100; /* 100 kHz is minimum possible value */ +} + +static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, + struct mmc_data *data) +{ + unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); + unsigned int khz; + + if (data->timeout_clks) { + khz = mmc_blk_clock_khz(host); + ms += DIV_ROUND_UP(data->timeout_clks, khz); + } + + return ms; +} + static inline bool mmc_blk_in_tran_state(u32 status) { /* @@ -1169,9 +1197,10 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, */ if (R1_CURRENT_STATE(status) == R1_STATE_DATA || R1_CURRENT_STATE(status) == R1_STATE_RCV) { - err = send_stop(card, - DIV_ROUND_UP(brq->data.timeout_ns, 1000000), - req, gen_err, &stop_status); + unsigned int timeout; + + timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); + err = send_stop(card, timeout, req, gen_err, &stop_status); if (err) { pr_err("%s: error %d sending stop command\n", req->rq_disk->disk_name, err); @@ -1977,6 +2006,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; int retries = 0; + unsigned int timeout = mmc_blk_data_timeout_ms(host, mrq->data); do { u32 status; @@ -1995,10 +2025,8 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) u32 stop_status = 0; bool gen_err = false; - err = send_stop(card, - DIV_ROUND_UP(mrq->data->timeout_ns, - 1000000), - req, &gen_err, &stop_status); + err = send_stop(card, timeout, req, &gen_err, + &stop_status); if (err) goto error_exit; } From 6b7a363d2ce83e3940dc0c3628e478fe95f23985 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:14 +0200 Subject: [PATCH 23/96] mmc: block: Reduce polling timeout from 10 minutes to 10 seconds Set a 10 second timeout for polling write request busy state. Note, mmc core is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 second software timer to timeout the whole request, so 10 seconds should be ample. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 46e63aec1fcb..9d323ed34f82 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -63,7 +63,13 @@ MODULE_ALIAS("mmc:block"); #endif #define MODULE_PARAM_PREFIX "mmcblk." -#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +/* + * Set a 10 second timeout for polling write request busy state. Note, mmc core + * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 + * second software timer to timeout the whole request, so 10 seconds should be + * ample. + */ +#define MMC_BLK_TIMEOUT_MS (10 * 1000) #define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) From 7eb43d537166c7d767af450901acd0ecbf94625c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:15 +0200 Subject: [PATCH 24/96] mmc: block: blk-mq: Stop using legacy recovery There are only a few things the recovery needs to do. Primarily, it just needs to: Determine the number of bytes transferred Get the card back to transfer state Determine whether to retry There are also a couple of additional features: Reset the card before the last retry Read one sector at a time The legacy code spent much effort analyzing command errors, but commands fail fast, so it is simpler just to give all command errors the same number of retries. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/block.c | 304 +++++++++++++++++++++------------------ 1 file changed, 161 insertions(+), 143 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 9d323ed34f82..bd7ead343500 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1557,9 +1557,11 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) } } -static enum mmc_blk_status __mmc_blk_err_check(struct mmc_card *card, - struct mmc_queue_req *mq_mrq) +static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, + struct mmc_async_req *areq) { + struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, + areq); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mmc_queue_req_to_req(mq_mrq); int need_retune = card->host->need_retune; @@ -1665,15 +1667,6 @@ static enum mmc_blk_status __mmc_blk_err_check(struct mmc_card *card, return MMC_BLK_SUCCESS; } -static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, - struct mmc_async_req *areq) -{ - struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, - areq); - - return __mmc_blk_err_check(card, mq_mrq); -} - static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, int disable_multi, bool *do_rel_wr_p, bool *do_data_tag_p) @@ -1999,8 +1992,39 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, } #define MMC_MAX_RETRIES 5 +#define MMC_DATA_RETRIES 2 #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) +static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) +{ + struct mmc_command cmd = { + .opcode = MMC_STOP_TRANSMISSION, + .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, + /* Some hosts wait for busy anyway, so provide a busy timeout */ + .busy_timeout = timeout, + }; + + return mmc_wait_for_cmd(card->host, &cmd, 5); +} + +static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_blk_request *brq = &mqrq->brq; + unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); + int err; + + mmc_retune_hold_now(card->host); + + mmc_blk_send_stop(card, timeout); + + err = card_busy_detect(card, timeout, false, req, NULL); + + mmc_retune_release(card->host); + + return err; +} + #define MMC_READ_SINGLE_RETRIES 2 /* Single sector read during recovery */ @@ -2012,7 +2036,6 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; int retries = 0; - unsigned int timeout = mmc_blk_data_timeout_ms(host, mrq->data); do { u32 status; @@ -2027,12 +2050,8 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) goto error_exit; if (!mmc_host_is_spi(host) && - R1_CURRENT_STATE(status) != R1_STATE_TRAN) { - u32 stop_status = 0; - bool gen_err = false; - - err = send_stop(card, timeout, req, &gen_err, - &stop_status); + !mmc_blk_in_tran_state(status)) { + err = mmc_blk_fix_state(card, req); if (err) goto error_exit; } @@ -2062,6 +2081,60 @@ error_exit: mqrq->retries = MMC_MAX_RETRIES - 1; } +static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) +{ + return !!brq->mrq.sbc; +} + +static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) +{ + return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; +} + +/* + * Check for errors the host controller driver might not have seen such as + * response mode errors or invalid card state. + */ +static bool mmc_blk_status_error(struct request *req, u32 status) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_blk_request *brq = &mqrq->brq; + struct mmc_queue *mq = req->q->queuedata; + u32 stop_err_bits; + + if (mmc_host_is_spi(mq->card->host)) + return 0; + + stop_err_bits = mmc_blk_stop_err_bits(brq); + + return brq->cmd.resp[0] & CMD_ERRORS || + brq->stop.resp[0] & stop_err_bits || + status & stop_err_bits || + (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status)); +} + +static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) +{ + return !brq->sbc.error && !brq->cmd.error && + !(brq->cmd.resp[0] & CMD_ERRORS); +} + +/* + * Requests are completed by mmc_blk_mq_complete_rq() which sets simple + * policy: + * 1. A request that has transferred at least some data is considered + * successful and will be requeued if there is remaining data to + * transfer. + * 2. Otherwise the number of retries is incremented and the request + * will be requeued if there are remaining retries. + * 3. Otherwise the request will be errored out. + * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and + * mqrq->retries. So there are only 4 possible actions here: + * 1. do not accept the bytes_xfered value i.e. set it to zero + * 2. change mqrq->retries to determine the number of retries + * 3. try to reset the card + * 4. read one sector at a time + */ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) { int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; @@ -2069,131 +2142,86 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) struct mmc_blk_request *brq = &mqrq->brq; struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = mq->card; - static enum mmc_blk_status status; + u32 status; + u32 blocks; + int err; - brq->retune_retry_done = mqrq->retries; - - status = __mmc_blk_err_check(card, mqrq); + /* + * Some errors the host driver might not have seen. Set the number of + * bytes transferred to zero in that case. + */ + err = __mmc_send_status(card, &status, 0); + if (err || mmc_blk_status_error(req, status)) + brq->data.bytes_xfered = 0; mmc_retune_release(card->host); /* - * Requests are completed by mmc_blk_mq_complete_rq() which sets simple - * policy: - * 1. A request that has transferred at least some data is considered - * successful and will be requeued if there is remaining data to - * transfer. - * 2. Otherwise the number of retries is incremented and the request - * will be requeued if there are remaining retries. - * 3. Otherwise the request will be errored out. - * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and - * mqrq->retries. So there are only 4 possible actions here: - * 1. do not accept the bytes_xfered value i.e. set it to zero - * 2. change mqrq->retries to determine the number of retries - * 3. try to reset the card - * 4. read one sector at a time + * Try again to get the status. This also provides an opportunity for + * re-tuning. */ - switch (status) { - case MMC_BLK_SUCCESS: - case MMC_BLK_PARTIAL: - /* Reset success, and accept bytes_xfered */ - mmc_blk_reset_success(md, type); - break; - case MMC_BLK_CMD_ERR: - /* - * For SD cards, get bytes written, but do not accept - * bytes_xfered if that fails. For MMC cards accept - * bytes_xfered. Then try to reset. If reset fails then - * error out the remaining request, otherwise retry - * once (N.B mmc_blk_reset() will not succeed twice in a - * row). - */ - if (mmc_card_sd(card)) { - u32 blocks; - int err; + if (err) + err = __mmc_send_status(card, &status, 0); - err = mmc_sd_num_wr_blocks(card, &blocks); - if (err) - brq->data.bytes_xfered = 0; - else - brq->data.bytes_xfered = blocks << 9; - } - if (mmc_blk_reset(md, card->host, type)) - mqrq->retries = MMC_NO_RETRIES; - else - mqrq->retries = MMC_MAX_RETRIES - 1; - break; - case MMC_BLK_RETRY: - /* - * Do not accept bytes_xfered, but retry up to 5 times, - * otherwise same as abort. - */ - brq->data.bytes_xfered = 0; - if (mqrq->retries < MMC_MAX_RETRIES) - break; - /* Fall through */ - case MMC_BLK_ABORT: - /* - * Do not accept bytes_xfered, but try to reset. If - * reset succeeds, try once more, otherwise error out - * the request. - */ - brq->data.bytes_xfered = 0; - if (mmc_blk_reset(md, card->host, type)) - mqrq->retries = MMC_NO_RETRIES; - else - mqrq->retries = MMC_MAX_RETRIES - 1; - break; - case MMC_BLK_DATA_ERR: { - int err; + /* + * Nothing more to do after the number of bytes transferred has been + * updated and there is no card. + */ + if (err && mmc_detect_card_removed(card->host)) + return; - /* - * Do not accept bytes_xfered, but try to reset. If - * reset succeeds, try once more. If reset fails with - * ENODEV which means the partition is wrong, then error - * out the request. Otherwise attempt to read one sector - * at a time. - */ - brq->data.bytes_xfered = 0; - err = mmc_blk_reset(md, card->host, type); - if (!err) { - mqrq->retries = MMC_MAX_RETRIES - 1; - break; - } - if (err == -ENODEV) { - mqrq->retries = MMC_NO_RETRIES; - break; - } - /* Fall through */ + /* Try to get back to "tran" state */ + if (!mmc_host_is_spi(mq->card->host) && + (err || !mmc_blk_in_tran_state(status))) + err = mmc_blk_fix_state(mq->card, req); + + /* + * Special case for SD cards where the card might record the number of + * blocks written. + */ + if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && + rq_data_dir(req) == WRITE) { + if (mmc_sd_num_wr_blocks(card, &blocks)) + brq->data.bytes_xfered = 0; + else + brq->data.bytes_xfered = blocks << 9; } - case MMC_BLK_ECC_ERR: - /* - * Do not accept bytes_xfered. If reading more than one - * sector, try reading one sector at a time. - */ - brq->data.bytes_xfered = 0; - /* FIXME: Missing single sector read for large sector size */ - if (brq->data.blocks > 1 && !mmc_large_sector(card)) { - /* Redo read one sector at a time */ - pr_warn("%s: retrying using single block read\n", - req->rq_disk->disk_name); - mmc_blk_read_single(mq, req); - } else { - mqrq->retries = MMC_NO_RETRIES; - } - break; - case MMC_BLK_NOMEDIUM: - /* Do not accept bytes_xfered. Error out the request */ - brq->data.bytes_xfered = 0; + + /* Reset if the card is in a bad state */ + if (!mmc_host_is_spi(mq->card->host) && + err && mmc_blk_reset(md, card->host, type)) { + pr_err("%s: recovery failed!\n", req->rq_disk->disk_name); mqrq->retries = MMC_NO_RETRIES; - break; - default: - /* Do not accept bytes_xfered. Error out the request */ - brq->data.bytes_xfered = 0; - mqrq->retries = MMC_NO_RETRIES; - pr_err("%s: Unhandled return value (%d)", - req->rq_disk->disk_name, status); - break; + return; + } + + /* + * If anything was done, just return and if there is anything remaining + * on the request it will get requeued. + */ + if (brq->data.bytes_xfered) + return; + + /* Reset before last retry */ + if (mqrq->retries + 1 == MMC_MAX_RETRIES) + mmc_blk_reset(md, card->host, type); + + /* Command errors fail fast, so use all MMC_MAX_RETRIES */ + if (brq->sbc.error || brq->cmd.error) + return; + + /* Reduce the remaining retries for data errors */ + if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { + mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; + return; + } + + /* FIXME: Missing single sector read for large sector size */ + if (!mmc_large_sector(card) && rq_data_dir(req) == READ && + brq->data.blocks > 1) { + /* Read one sector at a time */ + mmc_blk_read_single(mq, req); + return; } } @@ -2205,16 +2233,6 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; } -static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) -{ - return !!brq->mrq.sbc; -} - -static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) -{ - return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; -} - static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); From 42f532da3a44843668dbacc1838a028b0a9b7373 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:16 +0200 Subject: [PATCH 25/96] mmc: mmc_test: Do not use mmc_start_areq() anymore The block driver's blk-mq paths do not use mmc_start_areq(). In order to remove mmc_start_areq() entirely, start by removing it from mmc_test. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson Tested-by: Linus Walleij --- drivers/mmc/core/mmc_test.c | 122 ++++++++++++++++-------------------- 1 file changed, 54 insertions(+), 68 deletions(-) diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 478869805b96..9311c8de2061 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -171,11 +171,6 @@ struct mmc_test_multiple_rw { enum mmc_test_prep_media prepare; }; -struct mmc_test_async_req { - struct mmc_async_req areq; - struct mmc_test_card *test; -}; - /*******************************************************************/ /* General helper functions */ /*******************************************************************/ @@ -741,30 +736,6 @@ static int mmc_test_check_result(struct mmc_test_card *test, return ret; } -static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card, - struct mmc_async_req *areq) -{ - struct mmc_test_async_req *test_async = - container_of(areq, struct mmc_test_async_req, areq); - int ret; - - mmc_test_wait_busy(test_async->test); - - /* - * FIXME: this would earlier just casts a regular error code, - * either of the kernel type -ERRORCODE or the local test framework - * RESULT_* errorcode, into an enum mmc_blk_status and return as - * result check. Instead, convert it to some reasonable type by just - * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR. - * If possible, a reasonable error code should be returned. - */ - ret = mmc_test_check_result(test_async->test, areq->mrq); - if (ret) - return MMC_BLK_CMD_ERR; - - return MMC_BLK_SUCCESS; -} - /* * Checks that a "short transfer" behaved as expected */ @@ -831,6 +802,45 @@ static struct mmc_test_req *mmc_test_req_alloc(void) return rq; } +static void mmc_test_wait_done(struct mmc_request *mrq) +{ + complete(&mrq->completion); +} + +static int mmc_test_start_areq(struct mmc_test_card *test, + struct mmc_request *mrq, + struct mmc_request *prev_mrq) +{ + struct mmc_host *host = test->card->host; + int err = 0; + + if (mrq) { + init_completion(&mrq->completion); + mrq->done = mmc_test_wait_done; + mmc_pre_req(host, mrq); + } + + if (prev_mrq) { + wait_for_completion(&prev_mrq->completion); + err = mmc_test_wait_busy(test); + if (!err) + err = mmc_test_check_result(test, prev_mrq); + } + + if (!err && mrq) { + err = mmc_start_request(host, mrq); + if (err) + mmc_retune_release(host); + } + + if (prev_mrq) + mmc_post_req(host, prev_mrq, 0); + + if (err && mrq) + mmc_post_req(host, mrq, err); + + return err; +} static int mmc_test_nonblock_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, @@ -838,17 +848,10 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, unsigned blksz, int write, int count) { struct mmc_test_req *rq1, *rq2; - struct mmc_test_async_req test_areq[2]; - struct mmc_async_req *done_areq; - struct mmc_async_req *cur_areq = &test_areq[0].areq; - struct mmc_async_req *other_areq = &test_areq[1].areq; - enum mmc_blk_status status; + struct mmc_request *mrq, *prev_mrq; int i; int ret = RESULT_OK; - test_areq[0].test = test; - test_areq[1].test = test; - rq1 = mmc_test_req_alloc(); rq2 = mmc_test_req_alloc(); if (!rq1 || !rq2) { @@ -856,33 +859,25 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, goto err; } - cur_areq->mrq = &rq1->mrq; - cur_areq->err_check = mmc_test_check_result_async; - other_areq->mrq = &rq2->mrq; - other_areq->err_check = mmc_test_check_result_async; + mrq = &rq1->mrq; + prev_mrq = NULL; for (i = 0; i < count; i++) { - mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr, - blocks, blksz, write); - done_areq = mmc_start_areq(test->card->host, cur_areq, &status); - - if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) { - ret = RESULT_FAIL; + mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq)); + mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks, + blksz, write); + ret = mmc_test_start_areq(test, mrq, prev_mrq); + if (ret) goto err; - } - if (done_areq) - mmc_test_req_reset(container_of(done_areq->mrq, - struct mmc_test_req, mrq)); + if (!prev_mrq) + prev_mrq = &rq2->mrq; - swap(cur_areq, other_areq); + swap(mrq, prev_mrq); dev_addr += blocks; } - done_areq = mmc_start_areq(test->card->host, NULL, &status); - if (status != MMC_BLK_SUCCESS) - ret = RESULT_FAIL; - + ret = mmc_test_start_areq(test, NULL, prev_mrq); err: kfree(rq1); kfree(rq2); @@ -2356,11 +2351,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, struct mmc_test_req *rq = mmc_test_req_alloc(); struct mmc_host *host = test->card->host; struct mmc_test_area *t = &test->area; - struct mmc_test_async_req test_areq = { .test = test }; struct mmc_request *mrq; unsigned long timeout; bool expired = false; - enum mmc_blk_status blkstat = MMC_BLK_SUCCESS; int ret = 0, cmd_ret; u32 status = 0; int count = 0; @@ -2373,9 +2366,6 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, mrq->sbc = &rq->sbc; mrq->cap_cmd_during_tfr = true; - test_areq.areq.mrq = mrq; - test_areq.areq.err_check = mmc_test_check_result_async; - mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 512, write); @@ -2388,11 +2378,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, /* Start ongoing data request */ if (use_areq) { - mmc_start_areq(host, &test_areq.areq, &blkstat); - if (blkstat != MMC_BLK_SUCCESS) { - ret = RESULT_FAIL; + ret = mmc_test_start_areq(test, mrq, NULL); + if (ret) goto out_free; - } } else { mmc_wait_for_req(host, mrq); } @@ -2426,9 +2414,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, /* Wait for data request to complete */ if (use_areq) { - mmc_start_areq(host, NULL, &blkstat); - if (blkstat != MMC_BLK_SUCCESS) - ret = RESULT_FAIL; + ret = mmc_test_start_areq(test, NULL, mrq); } else { mmc_wait_for_req_done(test->card->host, mrq); } From aa95014445769f3ac204f85ff85efe11bbd0bc8c Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 30 Nov 2017 05:54:24 +0800 Subject: [PATCH 26/96] mmc: block: blk-mq: fix boolreturn.cocci warnings drivers/mmc/core/block.c:2106:9-10: WARNING: return of 0/1 in function 'mmc_blk_status_error' with return type bool Return statements in functions returning bool should use true/false instead of 1/0. Generated by: scripts/coccinelle/misc/boolreturn.cocci Fixes:7eb43d537166 ("mmc: block: blk-mq: Stop using legacy recovery") CC: Adrian Hunter Signed-off-by: Fengguang Wu Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index bd7ead343500..ab384ba6cb37 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2103,7 +2103,7 @@ static bool mmc_blk_status_error(struct request *req, u32 status) u32 stop_err_bits; if (mmc_host_is_spi(mq->card->host)) - return 0; + return false; stop_err_bits = mmc_blk_stop_err_bits(brq); From 15ff2946b3c9661b14fc5123902dad28e1f13f3e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 30 Nov 2017 11:37:38 +0000 Subject: [PATCH 27/96] mmc: block: make function mmc_cqe_issue_type static The function mmc_cqe_issue_type is local to the source and does not need to be in global scope, so make it static. Cleans up sparse warning: drivers/mmc/core/queue.c:62:21: warning: symbol 'mmc_cqe_issue_type' was not declared. Should it be static? Signed-off-by: Colin Ian King Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/core/queue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index d8394007bc99..5db388081789 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -59,8 +59,8 @@ static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) return host->caps2 & MMC_CAP2_CQE_DCMD; } -enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, - struct request *req) +static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, + struct request *req) { switch (req_op(req)) { case REQ_OP_DRV_IN: From c14e60963ec1e0595250955271abfe4d5e96b3cb Mon Sep 17 00:00:00 2001 From: Ulrich Hecht Date: Wed, 29 Nov 2017 17:06:45 +0100 Subject: [PATCH 28/96] mmc: renesas_sdhi: enable R-Car D3 (r8a77995) support Whitelists for internal DMAC implementation. Signed-off-by: Ulrich Hecht Reviewed-by: Geert Uytterhoeven Signed-off-by: Ulf Hansson Acked-by: Wolfram Sang --- drivers/mmc/host/renesas_sdhi_internal_dmac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 41cbe84c1d18..396ae8a1c250 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -255,6 +255,7 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = { { .soc_id = "r8a7795", .revision = "ES1.*" }, { .soc_id = "r8a7795", .revision = "ES2.0" }, { .soc_id = "r8a7796", .revision = "ES1.0" }, + { .soc_id = "r8a77995", .revision = "ES1.0" }, { /* sentinel */ } }; From 043f2dca367c752bf8a570130f4f0ace4b4be4a8 Mon Sep 17 00:00:00 2001 From: Milan Stevanovic Date: Tue, 28 Nov 2017 01:02:57 +0100 Subject: [PATCH 29/96] mmc: sdhci-of-arasan: Add sdhci_arasan_set_power The power register needs to have a valid voltage set even when the power supply is managed by an external regulator. Signed-off-by: Milan Stevanovic Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-arasan.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 0720ea717011..fb572066a88b 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -262,6 +262,17 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc, return -EINVAL; } +static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + if (!IS_ERR(host->mmc->supply.vmmc)) { + struct mmc_host *mmc = host->mmc; + + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + } + sdhci_set_power_noreg(host, mode, vdd); +} + static const struct sdhci_ops sdhci_arasan_ops = { .set_clock = sdhci_arasan_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, @@ -269,6 +280,7 @@ static const struct sdhci_ops sdhci_arasan_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_arasan_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_power = sdhci_arasan_set_power, }; static const struct sdhci_pltfm_data sdhci_arasan_pdata = { From a5b97be2a7bbfc20d75f51f0969f102015edab6d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 27 Nov 2017 12:53:22 +0100 Subject: [PATCH 30/96] mmc_test: use ktime_get_ts64 for timestamps Calling getnstimeofday() can suffer from time jumps and from the y2038 overflow, so it is not appropriate here. Using ktime_get_ts64() solves both problems. Using ktime_get() with ktime_t timestamps would also work, but it seems that we mainly want to print the times as seconds+nanoseconds, so it would require an extra division in the output. Signed-off-by: Arnd Bergmann Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc_test.c | 88 ++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 46 deletions(-) diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 9311c8de2061..f96bbb8014e1 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -101,7 +101,7 @@ struct mmc_test_transfer_result { struct list_head link; unsigned int count; unsigned int sectors; - struct timespec ts; + struct timespec64 ts; unsigned int rate; unsigned int iops; }; @@ -510,14 +510,11 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, /* * Calculate transfer rate in bytes per second. */ -static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) +static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts) { uint64_t ns; - ns = ts->tv_sec; - ns *= 1000000000; - ns += ts->tv_nsec; - + ns = timespec64_to_ns(ts); bytes *= 1000000000; while (ns > UINT_MAX) { @@ -537,7 +534,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) * Save transfer results for future usage */ static void mmc_test_save_transfer_result(struct mmc_test_card *test, - unsigned int count, unsigned int sectors, struct timespec ts, + unsigned int count, unsigned int sectors, struct timespec64 ts, unsigned int rate, unsigned int iops) { struct mmc_test_transfer_result *tr; @@ -562,21 +559,21 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test, * Print the transfer rate. */ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, - struct timespec *ts1, struct timespec *ts2) + struct timespec64 *ts1, struct timespec64 *ts2) { unsigned int rate, iops, sectors = bytes >> 9; - struct timespec ts; + struct timespec64 ts; - ts = timespec_sub(*ts2, *ts1); + ts = timespec64_sub(*ts2, *ts1); rate = mmc_test_rate(bytes, &ts); iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ - pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " + pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u " "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", mmc_hostname(test->card->host), sectors, sectors >> 1, - (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, - (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024, + (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec, + (u32)ts.tv_nsec, rate / 1000, rate / 1024, iops / 100, iops % 100); mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); @@ -586,24 +583,24 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, * Print the average transfer rate. */ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, - unsigned int count, struct timespec *ts1, - struct timespec *ts2) + unsigned int count, struct timespec64 *ts1, + struct timespec64 *ts2) { unsigned int rate, iops, sectors = bytes >> 9; uint64_t tot = bytes * count; - struct timespec ts; + struct timespec64 ts; - ts = timespec_sub(*ts2, *ts1); + ts = timespec64_sub(*ts2, *ts1); rate = mmc_test_rate(tot, &ts); iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " - "%lu.%09lu seconds (%u kB/s, %u KiB/s, " + "%llu.%09u seconds (%u kB/s, %u KiB/s, " "%u.%02u IOPS, sg_len %d)\n", mmc_hostname(test->card->host), count, sectors, count, sectors >> 1, (sectors & 1 ? ".5" : ""), - (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, + (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1000, rate / 1024, iops / 100, iops % 100, test->area.sg_len); @@ -1444,7 +1441,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, int max_scatter, int timed, int count, bool nonblock, int min_sg_len) { - struct timespec ts1, ts2; + struct timespec64 ts1, ts2; int ret = 0; int i; struct mmc_test_area *t = &test->area; @@ -1470,7 +1467,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, return ret; if (timed) - getnstimeofday(&ts1); + ktime_get_ts64(&ts1); if (nonblock) ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, dev_addr, t->blocks, 512, write, count); @@ -1484,7 +1481,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, return ret; if (timed) - getnstimeofday(&ts2); + ktime_get_ts64(&ts2); if (timed) mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); @@ -1742,7 +1739,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test) struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr; - struct timespec ts1, ts2; + struct timespec64 ts1, ts2; int ret; if (!mmc_can_trim(test->card)) @@ -1753,19 +1750,19 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test) for (sz = 512; sz < t->max_sz; sz <<= 1) { dev_addr = t->dev_addr + (sz >> 9); - getnstimeofday(&ts1); + ktime_get_ts64(&ts1); ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); if (ret) return ret; - getnstimeofday(&ts2); + ktime_get_ts64(&ts2); mmc_test_print_rate(test, sz, &ts1, &ts2); } dev_addr = t->dev_addr; - getnstimeofday(&ts1); + ktime_get_ts64(&ts1); ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); if (ret) return ret; - getnstimeofday(&ts2); + ktime_get_ts64(&ts2); mmc_test_print_rate(test, sz, &ts1, &ts2); return 0; } @@ -1774,19 +1771,19 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) { struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt; - struct timespec ts1, ts2; + struct timespec64 ts1, ts2; int ret; cnt = t->max_sz / sz; dev_addr = t->dev_addr; - getnstimeofday(&ts1); + ktime_get_ts64(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); if (ret) return ret; dev_addr += (sz >> 9); } - getnstimeofday(&ts2); + ktime_get_ts64(&ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); return 0; } @@ -1813,7 +1810,7 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) { struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt; - struct timespec ts1, ts2; + struct timespec64 ts1, ts2; int ret; ret = mmc_test_area_erase(test); @@ -1821,14 +1818,14 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) return ret; cnt = t->max_sz / sz; dev_addr = t->dev_addr; - getnstimeofday(&ts1); + ktime_get_ts64(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); if (ret) return ret; dev_addr += (sz >> 9); } - getnstimeofday(&ts2); + ktime_get_ts64(&ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); return 0; } @@ -1859,7 +1856,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr, i, cnt; - struct timespec ts1, ts2; + struct timespec64 ts1, ts2; int ret; if (!mmc_can_trim(test->card)) @@ -1877,7 +1874,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) return ret; cnt = t->max_sz / sz; dev_addr = t->dev_addr; - getnstimeofday(&ts1); + ktime_get_ts64(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); @@ -1885,7 +1882,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) return ret; dev_addr += (sz >> 9); } - getnstimeofday(&ts2); + ktime_get_ts64(&ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); } return 0; @@ -1907,7 +1904,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, { unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; unsigned int ssz; - struct timespec ts1, ts2, ts; + struct timespec64 ts1, ts2, ts; int ret; ssz = sz >> 9; @@ -1916,10 +1913,10 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, range1 = rnd_addr / test->card->pref_erase; range2 = range1 / ssz; - getnstimeofday(&ts1); + ktime_get_ts64(&ts1); for (cnt = 0; cnt < UINT_MAX; cnt++) { - getnstimeofday(&ts2); - ts = timespec_sub(ts2, ts1); + ktime_get_ts64(&ts2); + ts = timespec64_sub(ts2, ts1); if (ts.tv_sec >= 10) break; ea = mmc_test_rnd_num(range1); @@ -1993,7 +1990,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, { struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt, sz, ssz; - struct timespec ts1, ts2; + struct timespec64 ts1, ts2; int ret; sz = t->max_tfr; @@ -2020,7 +2017,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, cnt = tot_sz / sz; dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ - getnstimeofday(&ts1); + ktime_get_ts64(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_test_area_io(test, sz, dev_addr, write, max_scatter, 0); @@ -2028,7 +2025,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, return ret; dev_addr += ssz; } - getnstimeofday(&ts2); + ktime_get_ts64(&ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); @@ -3052,10 +3049,9 @@ static int mtf_test_show(struct seq_file *sf, void *data) seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); list_for_each_entry(tr, &gr->tr_lst, link) { - seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n", + seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n", tr->count, tr->sectors, - (unsigned long)tr->ts.tv_sec, - (unsigned long)tr->ts.tv_nsec, + (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec, tr->rate, tr->iops / 100, tr->iops % 100); } } From 34597a3f60b1639ec8da440ec12afbfd057fb885 Mon Sep 17 00:00:00 2001 From: Shah Nehal-Bakulchandra Date: Fri, 1 Dec 2017 15:38:52 +0530 Subject: [PATCH 31/96] mmc: sdhci-acpi: Add support for ACPI HID of AMD Controller with HS400 This patch supports HS400 for AMD upcoming emmc 5.0 controller.The HS400 and HS200 mode requires hardware work around also. This patch adds the quirks for the same. Signed-off-by: Nehal-bakulchandra Shah Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-acpi.c | 79 +++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index b988997a1e80..1b1ce804d2d7 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -446,6 +446,83 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { .caps = MMC_CAP_NONREMOVABLE, }; +/* AMD sdhci reset dll register. */ +#define SDHCI_AMD_RESET_DLL_REGISTER 0x908 + +static int amd_select_drive_strength(struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type) +{ + return MMC_SET_DRIVER_TYPE_A; +} + +static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host) +{ + /* AMD Platform requires dll setting */ + sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); + usleep_range(10, 20); + sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); +} + +/* + * For AMD Platform it is required to disable the tuning + * bit first controller to bring to HS Mode from HS200 + * mode, later enable to tune to HS400 mode. + */ +static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned int old_timing = host->timing; + + sdhci_set_ios(mmc, ios); + if (old_timing == MMC_TIMING_MMC_HS200 && + ios->timing == MMC_TIMING_MMC_HS) + sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2); + if (old_timing != MMC_TIMING_MMC_HS400 && + ios->timing == MMC_TIMING_MMC_HS400) { + sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2); + sdhci_acpi_amd_hs400_dll(host); + } +} + +static const struct sdhci_ops sdhci_acpi_ops_amd = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = { + .ops = &sdhci_acpi_ops_amd, +}; + +static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, + const char *hid, const char *uid) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct sdhci_host *host = c->host; + + sdhci_read_caps(host); + if (host->caps1 & SDHCI_SUPPORT_DDR50) + host->mmc->caps = MMC_CAP_1_8V_DDR; + + if ((host->caps1 & SDHCI_SUPPORT_SDR104) && + (host->mmc->caps & MMC_CAP_1_8V_DDR)) + host->mmc->caps2 = MMC_CAP2_HS400_1_8V; + + host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; + host->mmc_host_ops.set_ios = amd_set_ios; + return 0; +} + +static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { + .chip = &sdhci_acpi_chip_amd, + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE, + .probe_slot = sdhci_acpi_emmc_amd_probe_slot, +}; + struct sdhci_acpi_uid_slot { const char *hid; const char *uid; @@ -469,6 +546,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { { "PNP0D40" }, { "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v }, { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd }, + { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc }, { }, }; @@ -485,6 +563,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = { { "PNP0D40" }, { "QCOM8051" }, { "QCOM8052" }, + { "AMDI0040" }, { }, }; MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); From d2383318c5a626312d166217e3788e54b1650c56 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 1 Dec 2017 14:55:30 +0200 Subject: [PATCH 32/96] mmc: core: Ensure cmd_completion is initialized mmc_test now uses mmc_start_request() to test sending commands during "ongoing" asynchronous transfers, i.e. tests: Commands during non-blocking read - use Set Block Count (CMD23) Commands during non-blocking write - use Set Block Count (CMD23) mmc_start_request() was not initializing cmd_completion, but cmd_completion is used by "ongoing" transfers, so move initialization of cmd_completion into making mmc_start_request(). Fixes: cb39f61e9b1e ("mmc: core: Export a few functions needed for blkmq support") Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/core/core.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 617802f45386..455abbf4f41e 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -348,6 +348,8 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) { int err; + init_completion(&mrq->cmd_completion); + mmc_retune_hold(host); if (mmc_card_removed(host->card)) @@ -418,8 +420,6 @@ static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) mrq->done = mmc_wait_data_done; mrq->host = host; - init_completion(&mrq->cmd_completion); - err = mmc_start_request(host, mrq); if (err) { mrq->cmd->error = err; @@ -439,8 +439,6 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) init_completion(&mrq->completion); mrq->done = mmc_wait_done; - init_completion(&mrq->cmd_completion); - err = mmc_start_request(host, mrq); if (err) { mrq->cmd->error = err; From 23a185254ace8e63dc4ca36e0315aed9440ae749 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 1 Dec 2017 14:55:31 +0200 Subject: [PATCH 33/96] mmc: mmc_test: Ensure command queue is disabled for testing mmc_test disables the command queue because none of the tests use the command queue. However the Reset Test will re-enable it, so disable it in that case too. Fixes: 9d4579a85c84 ("mmc: mmc_test: Disable Command Queue while mmc_test is used") Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc_test.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index f96bbb8014e1..ef18daeaa4cc 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -2320,10 +2320,17 @@ static int mmc_test_reset(struct mmc_test_card *test) int err; err = mmc_hw_reset(host); - if (!err) + if (!err) { + /* + * Reset will re-enable the card's command queue, but tests + * expect it to be disabled. + */ + if (card->ext_csd.cmdq_en) + mmc_cmdq_disable(card); return RESULT_OK; - else if (err == -EOPNOTSUPP) + } else if (err == -EOPNOTSUPP) { return RESULT_UNSUP_HOST; + } return RESULT_FAIL; } From f2bc600008bd6f7f5d0b6b56238d14f95cd454d2 Mon Sep 17 00:00:00 2001 From: "yinbo.zhu" Date: Fri, 1 Dec 2017 15:09:34 +0800 Subject: [PATCH 34/96] mmc: sdhci-of-esdhc: fix the mmc error after sleep on ls1046ardb When system wakes up from sleep on ls1046ardb, the SD operation fails with mmc error messages since ESDHC_TB_EN bit couldn't be cleaned by eSDHC_SYSCTL[RSTA]. It's proper to clean this bit in esdhc_reset() rather than in probe. Signed-off-by: yinbo.zhu Acked-by: Yangbo Lu Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-esdhc.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index d74030f3bd12..4ffa6b173a21 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) static void esdhc_reset(struct sdhci_host *host, u8 mask) { + u32 val; + sdhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + + if (mask & SDHCI_RESET_ALL) { + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + } } /* The SCFG, Supplemental Configuration Unit, provides SoC specific @@ -785,10 +793,6 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) pltfm_host = sdhci_priv(host); esdhc = sdhci_pltfm_priv(pltfm_host); - val = sdhci_readl(host, ESDHC_TBCTL); - val &= ~ESDHC_TB_EN; - sdhci_writel(host, val, ESDHC_TBCTL); - host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; From 1bec43a3b181baebdf8a4cd739b480a9132601d7 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:17 +0200 Subject: [PATCH 35/96] mmc: core: Remove option not to use blk-mq Remove config option MMC_MQ_DEFAULT and parameter mmc_use_blk_mq, so that blk-mq must be used always. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Tested-by: Linus Walleij Signed-off-by: Ulf Hansson --- drivers/mmc/Kconfig | 10 ---------- drivers/mmc/core/core.c | 7 ------- drivers/mmc/core/core.h | 2 -- drivers/mmc/core/host.c | 2 -- drivers/mmc/core/host.h | 2 +- 5 files changed, 1 insertion(+), 22 deletions(-) diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 42565562577c..ec21388311db 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig @@ -12,16 +12,6 @@ menuconfig MMC If you want MMC/SD/SDIO support, you should say Y here and also to your specific host controller driver. -config MMC_MQ_DEFAULT - bool "MMC: use blk-mq I/O path by default" - depends on MMC && BLOCK - default y - ---help--- - This option enables the new blk-mq based I/O path for MMC block - devices by default. With the option the mmc_core.use_blk_mq - module/boot option defaults to Y, without it to N, but it can - still be overridden either way. - if MMC source "drivers/mmc/core/Kconfig" diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 455abbf4f41e..2a137976107f 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -66,13 +66,6 @@ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; bool use_spi_crc = 1; module_param(use_spi_crc, bool, 0); -#ifdef CONFIG_MMC_MQ_DEFAULT -bool mmc_use_blk_mq = true; -#else -bool mmc_use_blk_mq = false; -#endif -module_param_named(use_blk_mq, mmc_use_blk_mq, bool, S_IWUSR | S_IRUGO); - static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) { diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 136617d2f971..3e3d21304e5f 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -35,8 +35,6 @@ struct mmc_bus_ops { int (*reset)(struct mmc_host *); }; -extern bool mmc_use_blk_mq; - void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); void mmc_detach_bus(struct mmc_host *host); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 409a68a96a0a..64b03d6eaf18 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -404,8 +404,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) host->fixed_drv_type = -EINVAL; - host->use_blk_mq = mmc_use_blk_mq; - return host; } diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index 8ca284e079e3..6d896869e5c6 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -81,7 +81,7 @@ static inline bool mmc_card_hs400es(struct mmc_card *card) static inline bool mmc_host_use_blk_mq(struct mmc_host *host) { - return host->use_blk_mq; + return true; } #endif From 0fbfd12518303e9b32ac9fd231439459eac848f9 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:18 +0200 Subject: [PATCH 36/96] mmc: block: Remove code no longer needed after the switch to blk-mq Remove code no longer needed after the switch to blk-mq. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Tested-by: Linus Walleij Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 723 +-------------------------------------- drivers/mmc/core/block.h | 2 - drivers/mmc/core/queue.c | 240 +------------ drivers/mmc/core/queue.h | 15 - 4 files changed, 16 insertions(+), 964 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index ab384ba6cb37..579fc0bd722f 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -967,8 +967,7 @@ static inline bool mmc_blk_in_tran_state(u32 status) } static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, - bool hw_busy_detect, struct request *req, - u32 *resp_errs) + struct request *req, u32 *resp_errs) { unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); int err = 0; @@ -988,11 +987,6 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, if (resp_errs) *resp_errs |= status; - /* We may rely on the host hw to handle busy detection.*/ - if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && - hw_busy_detect) - break; - /* * Timeout if the device never becomes ready for data and never * leaves the program state. @@ -1014,243 +1008,6 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, return err; } -static int card_busy_detect_err(struct mmc_card *card, unsigned int timeout_ms, - bool hw_busy_detect, struct request *req, - bool *gen_err) -{ - u32 resp_errs = 0; - int err; - - err = card_busy_detect(card, timeout_ms, hw_busy_detect, req, - &resp_errs); - if (resp_errs & R1_ERROR) { - pr_err("%s: %s: error sending status cmd, status %#x\n", - req->rq_disk->disk_name, __func__, resp_errs); - *gen_err = true; - } - - return err; -} - -static int send_stop(struct mmc_card *card, unsigned int timeout_ms, - struct request *req, bool *gen_err, u32 *stop_status) -{ - struct mmc_host *host = card->host; - struct mmc_command cmd = {}; - int err; - bool use_r1b_resp = rq_data_dir(req) == WRITE; - - /* - * Normally we use R1B responses for WRITE, but in cases where the host - * has specified a max_busy_timeout we need to validate it. A failure - * means we need to prevent the host from doing hw busy detection, which - * is done by converting to a R1 response instead. - */ - if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) - use_r1b_resp = false; - - cmd.opcode = MMC_STOP_TRANSMISSION; - if (use_r1b_resp) { - cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - cmd.busy_timeout = timeout_ms; - } else { - cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; - } - - err = mmc_wait_for_cmd(host, &cmd, 5); - if (err) - return err; - - *stop_status = cmd.resp[0]; - - /* No need to check card status in case of READ. */ - if (rq_data_dir(req) == READ) - return 0; - - if (!mmc_host_is_spi(host) && - (*stop_status & R1_ERROR)) { - pr_err("%s: %s: general error sending stop command, resp %#x\n", - req->rq_disk->disk_name, __func__, *stop_status); - *gen_err = true; - } - - return card_busy_detect_err(card, timeout_ms, use_r1b_resp, req, - gen_err); -} - -#define ERR_NOMEDIUM 3 -#define ERR_RETRY 2 -#define ERR_ABORT 1 -#define ERR_CONTINUE 0 - -static int mmc_blk_cmd_error(struct request *req, const char *name, int error, - bool status_valid, u32 status) -{ - switch (error) { - case -EILSEQ: - /* response crc error, retry the r/w cmd */ - pr_err("%s: %s sending %s command, card status %#x\n", - req->rq_disk->disk_name, "response CRC error", - name, status); - return ERR_RETRY; - - case -ETIMEDOUT: - pr_err("%s: %s sending %s command, card status %#x\n", - req->rq_disk->disk_name, "timed out", name, status); - - /* If the status cmd initially failed, retry the r/w cmd */ - if (!status_valid) { - pr_err("%s: status not valid, retrying timeout\n", - req->rq_disk->disk_name); - return ERR_RETRY; - } - - /* - * If it was a r/w cmd crc error, or illegal command - * (eg, issued in wrong state) then retry - we should - * have corrected the state problem above. - */ - if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) { - pr_err("%s: command error, retrying timeout\n", - req->rq_disk->disk_name); - return ERR_RETRY; - } - - /* Otherwise abort the command */ - return ERR_ABORT; - - default: - /* We don't understand the error code the driver gave us */ - pr_err("%s: unknown error %d sending read/write command, card status %#x\n", - req->rq_disk->disk_name, error, status); - return ERR_ABORT; - } -} - -/* - * Initial r/w and stop cmd error recovery. - * We don't know whether the card received the r/w cmd or not, so try to - * restore things back to a sane state. Essentially, we do this as follows: - * - Obtain card status. If the first attempt to obtain card status fails, - * the status word will reflect the failed status cmd, not the failed - * r/w cmd. If we fail to obtain card status, it suggests we can no - * longer communicate with the card. - * - Check the card state. If the card received the cmd but there was a - * transient problem with the response, it might still be in a data transfer - * mode. Try to send it a stop command. If this fails, we can't recover. - * - If the r/w cmd failed due to a response CRC error, it was probably - * transient, so retry the cmd. - * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. - * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or - * illegal cmd, retry. - * Otherwise we don't understand what happened, so abort. - */ -static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, - struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err) -{ - bool prev_cmd_status_valid = true; - u32 status, stop_status = 0; - int err, retry; - - if (mmc_card_removed(card)) - return ERR_NOMEDIUM; - - /* - * Try to get card status which indicates both the card state - * and why there was no response. If the first attempt fails, - * we can't be sure the returned status is for the r/w command. - */ - for (retry = 2; retry >= 0; retry--) { - err = __mmc_send_status(card, &status, 0); - if (!err) - break; - - /* Re-tune if needed */ - mmc_retune_recheck(card->host); - - prev_cmd_status_valid = false; - pr_err("%s: error %d sending status command, %sing\n", - req->rq_disk->disk_name, err, retry ? "retry" : "abort"); - } - - /* We couldn't get a response from the card. Give up. */ - if (err) { - /* Check if the card is removed */ - if (mmc_detect_card_removed(card->host)) - return ERR_NOMEDIUM; - return ERR_ABORT; - } - - /* Flag ECC errors */ - if ((status & R1_CARD_ECC_FAILED) || - (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || - (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) - *ecc_err = true; - - /* Flag General errors */ - if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) - if ((status & R1_ERROR) || - (brq->stop.resp[0] & R1_ERROR)) { - pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", - req->rq_disk->disk_name, __func__, - brq->stop.resp[0], status); - *gen_err = true; - } - - /* - * Check the current card state. If it is in some data transfer - * mode, tell it to stop (and hopefully transition back to TRAN.) - */ - if (R1_CURRENT_STATE(status) == R1_STATE_DATA || - R1_CURRENT_STATE(status) == R1_STATE_RCV) { - unsigned int timeout; - - timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); - err = send_stop(card, timeout, req, gen_err, &stop_status); - if (err) { - pr_err("%s: error %d sending stop command\n", - req->rq_disk->disk_name, err); - /* - * If the stop cmd also timed out, the card is probably - * not present, so abort. Other errors are bad news too. - */ - return ERR_ABORT; - } - - if (stop_status & R1_CARD_ECC_FAILED) - *ecc_err = true; - } - - /* Check for set block count errors */ - if (brq->sbc.error) - return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, - prev_cmd_status_valid, status); - - /* Check for r/w command errors */ - if (brq->cmd.error) - return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, - prev_cmd_status_valid, status); - - /* Data errors */ - if (!brq->stop.error) - return ERR_CONTINUE; - - /* Now for stop errors. These aren't fatal to the transfer. */ - pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", - req->rq_disk->disk_name, brq->stop.error, - brq->cmd.resp[0], status); - - /* - * Subsitute in our own stop status as this will give the error - * state which happened during the execution of the r/w command. - */ - if (stop_status) { - brq->stop.resp[0] = stop_status; - brq->stop.error = 0; - } - return ERR_CONTINUE; -} - static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type) { @@ -1285,14 +1042,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) md->reset_done &= ~type; } -static void mmc_blk_end_request(struct request *req, blk_status_t error) -{ - if (req->mq_ctx) - blk_mq_end_request(req, error); - else - blk_end_request_all(req, error); -} - /* * The non-block commands come back from the block layer after it queued it and * processed it with all other requests and then they get issued in this @@ -1354,7 +1103,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) break; } mq_rq->drv_op_result = ret; - mmc_blk_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); + blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) @@ -1397,7 +1146,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) else mmc_blk_reset_success(md, type); fail: - mmc_blk_end_request(req, status); + blk_mq_end_request(req, status); } static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, @@ -1467,7 +1216,7 @@ out_retry: if (!err) mmc_blk_reset_success(md, type); out: - mmc_blk_end_request(req, status); + blk_mq_end_request(req, status); } static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) @@ -1477,7 +1226,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) int ret = 0; ret = mmc_flush_cache(card); - mmc_blk_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); + blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } /* @@ -1557,116 +1306,6 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) } } -static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, - struct mmc_async_req *areq) -{ - struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, - areq); - struct mmc_blk_request *brq = &mq_mrq->brq; - struct request *req = mmc_queue_req_to_req(mq_mrq); - int need_retune = card->host->need_retune; - bool ecc_err = false; - bool gen_err = false; - - /* - * sbc.error indicates a problem with the set block count - * command. No data will have been transferred. - * - * cmd.error indicates a problem with the r/w command. No - * data will have been transferred. - * - * stop.error indicates a problem with the stop command. Data - * may have been transferred, or may still be transferring. - */ - - mmc_blk_eval_resp_error(brq); - - if (brq->sbc.error || brq->cmd.error || - brq->stop.error || brq->data.error) { - switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { - case ERR_RETRY: - return MMC_BLK_RETRY; - case ERR_ABORT: - return MMC_BLK_ABORT; - case ERR_NOMEDIUM: - return MMC_BLK_NOMEDIUM; - case ERR_CONTINUE: - break; - } - } - - /* - * Check for errors relating to the execution of the - * initial command - such as address errors. No data - * has been transferred. - */ - if (brq->cmd.resp[0] & CMD_ERRORS) { - pr_err("%s: r/w command failed, status = %#x\n", - req->rq_disk->disk_name, brq->cmd.resp[0]); - return MMC_BLK_ABORT; - } - - /* - * Everything else is either success, or a data error of some - * kind. If it was a write, we may have transitioned to - * program mode, which we have to wait for it to complete. - */ - if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { - int err; - - /* Check stop command response */ - if (brq->stop.resp[0] & R1_ERROR) { - pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", - req->rq_disk->disk_name, __func__, - brq->stop.resp[0]); - gen_err = true; - } - - err = card_busy_detect_err(card, MMC_BLK_TIMEOUT_MS, false, req, - &gen_err); - if (err) - return MMC_BLK_CMD_ERR; - } - - /* if general error occurs, retry the write operation. */ - if (gen_err) { - pr_warn("%s: retrying write for general error\n", - req->rq_disk->disk_name); - return MMC_BLK_RETRY; - } - - /* Some errors (ECC) are flagged on the next commmand, so check stop, too */ - if (brq->data.error || brq->stop.error) { - if (need_retune && !brq->retune_retry_done) { - pr_debug("%s: retrying because a re-tune was needed\n", - req->rq_disk->disk_name); - brq->retune_retry_done = 1; - return MMC_BLK_RETRY; - } - pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", - req->rq_disk->disk_name, brq->data.error ?: brq->stop.error, - (unsigned)blk_rq_pos(req), - (unsigned)blk_rq_sectors(req), - brq->cmd.resp[0], brq->stop.resp[0]); - - if (rq_data_dir(req) == READ) { - if (ecc_err) - return MMC_BLK_ECC_ERR; - return MMC_BLK_DATA_ERR; - } else { - return MMC_BLK_CMD_ERR; - } - } - - if (!brq->data.bytes_xfered) - return MMC_BLK_RETRY; - - if (blk_rq_bytes(req) != brq->data.bytes_xfered) - return MMC_BLK_PARTIAL; - - return MMC_BLK_SUCCESS; -} - static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, int disable_multi, bool *do_rel_wr_p, bool *do_data_tag_p) @@ -1782,8 +1421,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, brq->data.sg_len = i; } - mqrq->areq.mrq = &brq->mrq; - if (do_rel_wr_p) *do_rel_wr_p = do_rel_wr; @@ -1987,8 +1624,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->mrq.sbc = &brq->sbc; } - - mqrq->areq.err_check = mmc_blk_err_check; } #define MMC_MAX_RETRIES 5 @@ -2018,7 +1653,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) mmc_blk_send_stop(card, timeout); - err = card_busy_detect(card, timeout, false, req, NULL); + err = card_busy_detect(card, timeout, req, NULL); mmc_retune_release(card->host); @@ -2242,7 +1877,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) return 0; - err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, &status); + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status); /* * Do not assume data transferred correctly if there are any error bits @@ -2622,350 +2257,6 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) } } -static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, - struct mmc_blk_request *brq, struct request *req, - bool old_req_pending) -{ - bool req_pending; - - /* - * If this is an SD card and we're writing, we can first - * mark the known good sectors as ok. - * - * If the card is not SD, we can still ok written sectors - * as reported by the controller (which might be less than - * the real number of written sectors, but never more). - */ - if (mmc_card_sd(card)) { - u32 blocks; - int err; - - err = mmc_sd_num_wr_blocks(card, &blocks); - if (err) - req_pending = old_req_pending; - else - req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9); - } else { - req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered); - } - return req_pending; -} - -static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card, - struct request *req, - struct mmc_queue_req *mqrq) -{ - if (mmc_card_removed(card)) - req->rq_flags |= RQF_QUIET; - while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req))); - mq->qcnt--; -} - -/** - * mmc_blk_rw_try_restart() - tries to restart the current async request - * @mq: the queue with the card and host to restart - * @req: a new request that want to be started after the current one - */ -static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req, - struct mmc_queue_req *mqrq) -{ - if (!req) - return; - - /* - * If the card was removed, just cancel everything and return. - */ - if (mmc_card_removed(mq->card)) { - req->rq_flags |= RQF_QUIET; - blk_end_request_all(req, BLK_STS_IOERR); - mq->qcnt--; /* FIXME: just set to 0? */ - return; - } - /* Else proceed and try to restart the current async request */ - mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); - mmc_start_areq(mq->card->host, &mqrq->areq, NULL); -} - -static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) -{ - struct mmc_blk_data *md = mq->blkdata; - struct mmc_card *card = md->queue.card; - struct mmc_blk_request *brq; - int disable_multi = 0, retry = 0, type, retune_retry_done = 0; - enum mmc_blk_status status; - struct mmc_queue_req *mqrq_cur = NULL; - struct mmc_queue_req *mq_rq; - struct request *old_req; - struct mmc_async_req *new_areq; - struct mmc_async_req *old_areq; - bool req_pending = true; - - if (new_req) { - mqrq_cur = req_to_mmc_queue_req(new_req); - mq->qcnt++; - } - - if (!mq->qcnt) - return; - - do { - if (new_req) { - /* - * When 4KB native sector is enabled, only 8 blocks - * multiple read or write is allowed - */ - if (mmc_large_sector(card) && - !IS_ALIGNED(blk_rq_sectors(new_req), 8)) { - pr_err("%s: Transfer size is not 4KB sector size aligned\n", - new_req->rq_disk->disk_name); - mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur); - return; - } - - mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq); - new_areq = &mqrq_cur->areq; - } else - new_areq = NULL; - - old_areq = mmc_start_areq(card->host, new_areq, &status); - if (!old_areq) { - /* - * We have just put the first request into the pipeline - * and there is nothing more to do until it is - * complete. - */ - return; - } - - /* - * An asynchronous request has been completed and we proceed - * to handle the result of it. - */ - mq_rq = container_of(old_areq, struct mmc_queue_req, areq); - brq = &mq_rq->brq; - old_req = mmc_queue_req_to_req(mq_rq); - type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; - - switch (status) { - case MMC_BLK_SUCCESS: - case MMC_BLK_PARTIAL: - /* - * Reset success, and accept bytes_xfered. For - * MMC_BLK_PARTIAL re-submit the remaining request. For - * MMC_BLK_SUCCESS error out the remaining request (it - * could not be re-submitted anyway if a next request - * had already begun). - */ - mmc_blk_reset_success(md, type); - - req_pending = blk_end_request(old_req, BLK_STS_OK, - brq->data.bytes_xfered); - /* - * If the blk_end_request function returns non-zero even - * though all data has been transferred and no errors - * were returned by the host controller, it's a bug. - */ - if (status == MMC_BLK_SUCCESS && req_pending) { - pr_err("%s BUG rq_tot %d d_xfer %d\n", - __func__, blk_rq_bytes(old_req), - brq->data.bytes_xfered); - mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); - return; - } - break; - case MMC_BLK_CMD_ERR: - /* - * For SD cards, get bytes written, but do not accept - * bytes_xfered if that fails. For MMC cards accept - * bytes_xfered. Then try to reset. If reset fails then - * error out the remaining request, otherwise retry - * once (N.B mmc_blk_reset() will not succeed twice in a - * row). - */ - req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); - if (mmc_blk_reset(md, card->host, type)) { - if (req_pending) - mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); - else - mq->qcnt--; - mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); - return; - } - if (!req_pending) { - mq->qcnt--; - mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); - return; - } - break; - case MMC_BLK_RETRY: - /* - * Do not accept bytes_xfered, but retry up to 5 times, - * otherwise same as abort. - */ - retune_retry_done = brq->retune_retry_done; - if (retry++ < 5) - break; - /* Fall through */ - case MMC_BLK_ABORT: - /* - * Do not accept bytes_xfered, but try to reset. If - * reset succeeds, try once more, otherwise error out - * the request. - */ - if (!mmc_blk_reset(md, card->host, type)) - break; - mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); - mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); - return; - case MMC_BLK_DATA_ERR: { - int err; - - /* - * Do not accept bytes_xfered, but try to reset. If - * reset succeeds, try once more. If reset fails with - * ENODEV which means the partition is wrong, then error - * out the request. Otherwise attempt to read one sector - * at a time. - */ - err = mmc_blk_reset(md, card->host, type); - if (!err) - break; - if (err == -ENODEV) { - mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); - mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); - return; - } - /* Fall through */ - } - case MMC_BLK_ECC_ERR: - /* - * Do not accept bytes_xfered. If reading more than one - * sector, try reading one sector at a time. - */ - if (brq->data.blocks > 1) { - /* Redo read one sector at a time */ - pr_warn("%s: retrying using single block read\n", - old_req->rq_disk->disk_name); - disable_multi = 1; - break; - } - /* - * After an error, we redo I/O one sector at a - * time, so we only reach here after trying to - * read a single sector. - */ - req_pending = blk_end_request(old_req, BLK_STS_IOERR, - brq->data.blksz); - if (!req_pending) { - mq->qcnt--; - mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); - return; - } - break; - case MMC_BLK_NOMEDIUM: - /* Do not accept bytes_xfered. Error out the request */ - mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); - mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); - return; - default: - /* Do not accept bytes_xfered. Error out the request */ - pr_err("%s: Unhandled return value (%d)", - old_req->rq_disk->disk_name, status); - mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); - mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); - return; - } - - if (req_pending) { - /* - * In case of a incomplete request - * prepare it again and resend. - */ - mmc_blk_rw_rq_prep(mq_rq, card, - disable_multi, mq); - mmc_start_areq(card->host, - &mq_rq->areq, NULL); - mq_rq->brq.retune_retry_done = retune_retry_done; - } - } while (req_pending); - - mq->qcnt--; -} - -void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) -{ - int ret; - struct mmc_blk_data *md = mq->blkdata; - struct mmc_card *card = md->queue.card; - - if (req && !mq->qcnt) - /* claim host only for the first request */ - mmc_get_card(card, NULL); - - ret = mmc_blk_part_switch(card, md->part_type); - if (ret) { - if (req) { - blk_end_request_all(req, BLK_STS_IOERR); - } - goto out; - } - - if (req) { - switch (req_op(req)) { - case REQ_OP_DRV_IN: - case REQ_OP_DRV_OUT: - /* - * Complete ongoing async transfer before issuing - * ioctl()s - */ - if (mq->qcnt) - mmc_blk_issue_rw_rq(mq, NULL); - mmc_blk_issue_drv_op(mq, req); - break; - case REQ_OP_DISCARD: - /* - * Complete ongoing async transfer before issuing - * discard. - */ - if (mq->qcnt) - mmc_blk_issue_rw_rq(mq, NULL); - mmc_blk_issue_discard_rq(mq, req); - break; - case REQ_OP_SECURE_ERASE: - /* - * Complete ongoing async transfer before issuing - * secure erase. - */ - if (mq->qcnt) - mmc_blk_issue_rw_rq(mq, NULL); - mmc_blk_issue_secdiscard_rq(mq, req); - break; - case REQ_OP_FLUSH: - /* - * Complete ongoing async transfer before issuing - * flush. - */ - if (mq->qcnt) - mmc_blk_issue_rw_rq(mq, NULL); - mmc_blk_issue_flush(mq, req); - break; - default: - /* Normal request, just issue it */ - mmc_blk_issue_rw_rq(mq, req); - card->host->context_info.is_waiting_last_req = false; - break; - } - } else { - /* No request, flushing the pipeline with NULL */ - mmc_blk_issue_rw_rq(mq, NULL); - card->host->context_info.is_waiting_last_req = false; - } - -out: - if (!mq->qcnt) - mmc_put_card(card, NULL); -} - static inline int mmc_blk_readonly(struct mmc_card *card) { return mmc_card_readonly(card) || diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h index b126418fd163..31153f656f41 100644 --- a/drivers/mmc/core/block.h +++ b/drivers/mmc/core/block.h @@ -5,8 +5,6 @@ struct mmc_queue; struct request; -void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req); - void mmc_blk_cqe_recovery(struct mmc_queue *mq); enum mmc_issued; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 5db388081789..421fab7250ac 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -24,22 +24,6 @@ #include "card.h" #include "host.h" -/* - * Prepare a MMC request. This just filters out odd stuff. - */ -static int mmc_prep_request(struct request_queue *q, struct request *req) -{ - struct mmc_queue *mq = q->queuedata; - - if (mq && mmc_card_removed(mq->card)) - return BLKPREP_KILL; - - req->rq_flags |= RQF_DONTPREP; - req_to_mmc_queue_req(req)->retries = 0; - - return BLKPREP_OK; -} - static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) { /* Allow only 1 DCMD at a time */ @@ -181,86 +165,6 @@ static void mmc_mq_recovery_handler(struct work_struct *work) blk_mq_run_hw_queues(q, true); } -static int mmc_queue_thread(void *d) -{ - struct mmc_queue *mq = d; - struct request_queue *q = mq->queue; - struct mmc_context_info *cntx = &mq->card->host->context_info; - - current->flags |= PF_MEMALLOC; - - down(&mq->thread_sem); - do { - struct request *req; - - spin_lock_irq(q->queue_lock); - set_current_state(TASK_INTERRUPTIBLE); - req = blk_fetch_request(q); - mq->asleep = false; - cntx->is_waiting_last_req = false; - cntx->is_new_req = false; - if (!req) { - /* - * Dispatch queue is empty so set flags for - * mmc_request_fn() to wake us up. - */ - if (mq->qcnt) - cntx->is_waiting_last_req = true; - else - mq->asleep = true; - } - spin_unlock_irq(q->queue_lock); - - if (req || mq->qcnt) { - set_current_state(TASK_RUNNING); - mmc_blk_issue_rq(mq, req); - cond_resched(); - } else { - if (kthread_should_stop()) { - set_current_state(TASK_RUNNING); - break; - } - up(&mq->thread_sem); - schedule(); - down(&mq->thread_sem); - } - } while (1); - up(&mq->thread_sem); - - return 0; -} - -/* - * Generic MMC request handler. This is called for any queue on a - * particular host. When the host is not busy, we look for a request - * on any queue on this host, and attempt to issue it. This may - * not be the queue we were asked to process. - */ -static void mmc_request_fn(struct request_queue *q) -{ - struct mmc_queue *mq = q->queuedata; - struct request *req; - struct mmc_context_info *cntx; - - if (!mq) { - while ((req = blk_fetch_request(q)) != NULL) { - req->rq_flags |= RQF_QUIET; - __blk_end_request_all(req, BLK_STS_IOERR); - } - return; - } - - cntx = &mq->card->host->context_info; - - if (cntx->is_waiting_last_req) { - cntx->is_new_req = true; - wake_up_interruptible(&cntx->wait); - } - - if (mq->asleep) - wake_up_process(mq->thread); -} - static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) { struct scatterlist *sg; @@ -311,12 +215,6 @@ static int __mmc_init_request(struct mmc_queue *mq, struct request *req, return 0; } -static int mmc_init_request(struct request_queue *q, struct request *req, - gfp_t gfp) -{ - return __mmc_init_request(q->queuedata, req, gfp); -} - static void mmc_exit_request(struct request_queue *q, struct request *req) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); @@ -469,9 +367,6 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); - /* Initialize thread_sem even if it is not used */ - sema_init(&mq->thread_sem, 1); - INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); @@ -559,51 +454,15 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; - int ret = -ENOMEM; mq->card = card; mq->use_cqe = host->cqe_enabled; - if (mq->use_cqe || mmc_host_use_blk_mq(host)) - return mmc_mq_init(mq, card, lock); - - mq->queue = blk_alloc_queue(GFP_KERNEL); - if (!mq->queue) - return -ENOMEM; - mq->queue->queue_lock = lock; - mq->queue->request_fn = mmc_request_fn; - mq->queue->init_rq_fn = mmc_init_request; - mq->queue->exit_rq_fn = mmc_exit_request; - mq->queue->cmd_size = sizeof(struct mmc_queue_req); - mq->queue->queuedata = mq; - mq->qcnt = 0; - ret = blk_init_allocated_queue(mq->queue); - if (ret) { - blk_cleanup_queue(mq->queue); - return ret; - } - - blk_queue_prep_rq(mq->queue, mmc_prep_request); - - mmc_setup_queue(mq, card); - - mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", - host->index, subname ? subname : ""); - - if (IS_ERR(mq->thread)) { - ret = PTR_ERR(mq->thread); - goto cleanup_queue; - } - - return 0; - -cleanup_queue: - blk_cleanup_queue(mq->queue); - return ret; + return mmc_mq_init(mq, card, lock); } -static void mmc_mq_queue_suspend(struct mmc_queue *mq) +void mmc_queue_suspend(struct mmc_queue *mq) { blk_mq_quiesce_queue(mq->queue); @@ -615,71 +474,22 @@ static void mmc_mq_queue_suspend(struct mmc_queue *mq) mmc_release_host(mq->card->host); } -static void mmc_mq_queue_resume(struct mmc_queue *mq) +void mmc_queue_resume(struct mmc_queue *mq) { blk_mq_unquiesce_queue(mq->queue); } -static void __mmc_queue_suspend(struct mmc_queue *mq) -{ - struct request_queue *q = mq->queue; - unsigned long flags; - - if (!mq->suspended) { - mq->suspended |= true; - - spin_lock_irqsave(q->queue_lock, flags); - blk_stop_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); - - down(&mq->thread_sem); - } -} - -static void __mmc_queue_resume(struct mmc_queue *mq) -{ - struct request_queue *q = mq->queue; - unsigned long flags; - - if (mq->suspended) { - mq->suspended = false; - - up(&mq->thread_sem); - - spin_lock_irqsave(q->queue_lock, flags); - blk_start_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); - } -} - void mmc_cleanup_queue(struct mmc_queue *mq) { struct request_queue *q = mq->queue; - unsigned long flags; - if (q->mq_ops) { - /* - * The legacy code handled the possibility of being suspended, - * so do that here too. - */ - if (blk_queue_quiesced(q)) - blk_mq_unquiesce_queue(q); - goto out_cleanup; - } + /* + * The legacy code handled the possibility of being suspended, + * so do that here too. + */ + if (blk_queue_quiesced(q)) + blk_mq_unquiesce_queue(q); - /* Make sure the queue isn't suspended, as that will deadlock */ - mmc_queue_resume(mq); - - /* Then terminate our worker thread */ - kthread_stop(mq->thread); - - /* Empty the queue */ - spin_lock_irqsave(q->queue_lock, flags); - q->queuedata = NULL; - blk_start_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); - -out_cleanup: blk_cleanup_queue(q); /* @@ -692,38 +502,6 @@ out_cleanup: mq->card = NULL; } -/** - * mmc_queue_suspend - suspend a MMC request queue - * @mq: MMC queue to suspend - * - * Stop the block request queue, and wait for our thread to - * complete any outstanding requests. This ensures that we - * won't suspend while a request is being processed. - */ -void mmc_queue_suspend(struct mmc_queue *mq) -{ - struct request_queue *q = mq->queue; - - if (q->mq_ops) - mmc_mq_queue_suspend(mq); - else - __mmc_queue_suspend(mq); -} - -/** - * mmc_queue_resume - resume a previously suspended MMC request queue - * @mq: MMC queue to resume - */ -void mmc_queue_resume(struct mmc_queue *mq) -{ - struct request_queue *q = mq->queue; - - if (q->mq_ops) - mmc_mq_queue_resume(mq); - else - __mmc_queue_resume(mq); -} - /* * Prepare the sg list(s) to be handed of to the host driver */ diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 34f601c6dd39..17e59d50b496 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -34,7 +34,6 @@ static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr) return blk_mq_rq_from_pdu(mqr); } -struct task_struct; struct mmc_blk_data; struct mmc_blk_ioc_data; @@ -44,7 +43,6 @@ struct mmc_blk_request { struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; - int retune_retry_done; }; /** @@ -66,7 +64,6 @@ enum mmc_drv_op { struct mmc_queue_req { struct mmc_blk_request brq; struct scatterlist *sg; - struct mmc_async_req areq; enum mmc_drv_op drv_op; int drv_op_result; void *drv_op_data; @@ -76,22 +73,10 @@ struct mmc_queue_req { struct mmc_queue { struct mmc_card *card; - struct task_struct *thread; - struct semaphore thread_sem; struct mmc_ctx ctx; struct blk_mq_tag_set tag_set; - bool suspended; - bool asleep; struct mmc_blk_data *blkdata; struct request_queue *queue; - /* - * FIXME: this counter is not a very reliable way of keeping - * track of how many requests that are ongoing. Switch to just - * letting the block core keep track of requests and per-request - * associated mmc_queue_req data. - */ - int qcnt; - int in_flight[MMC_ISSUE_MAX]; unsigned int cqe_busy; #define MMC_CQE_DCMD_BUSY BIT(0) From 126b62700386da782f83579e9b0431ea76c2da3d Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 29 Nov 2017 15:41:19 +0200 Subject: [PATCH 37/96] mmc: core: Remove code no longer needed after the switch to blk-mq Remove code no longer needed after the switch to blk-mq. Signed-off-by: Adrian Hunter Acked-by: Linus Walleij Tested-by: Linus Walleij Signed-off-by: Ulf Hansson --- drivers/mmc/core/bus.c | 2 - drivers/mmc/core/core.c | 183 +-------------------------------------- drivers/mmc/core/core.h | 8 -- drivers/mmc/core/host.h | 5 -- include/linux/mmc/host.h | 3 - 5 files changed, 1 insertion(+), 200 deletions(-) diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 7586ff2ad1f1..fc92c6c1c9a4 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -351,8 +351,6 @@ int mmc_add_card(struct mmc_card *card) #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif - mmc_init_context_info(card->host); - card->dev.of_node = mmc_of_find_child_device(card->host, 0); device_enable_async_suspend(&card->dev); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 2a137976107f..fd64e6d425e5 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -363,20 +363,6 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) } EXPORT_SYMBOL(mmc_start_request); -/* - * mmc_wait_data_done() - done callback for data request - * @mrq: done data request - * - * Wakes up mmc context, passed as a callback to host controller driver - */ -static void mmc_wait_data_done(struct mmc_request *mrq) -{ - struct mmc_context_info *context_info = &mrq->host->context_info; - - context_info->is_done_rcv = true; - wake_up_interruptible(&context_info->wait); -} - static void mmc_wait_done(struct mmc_request *mrq) { complete(&mrq->completion); @@ -394,35 +380,6 @@ static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host) wait_for_completion(&ongoing_mrq->cmd_completion); } -/* - *__mmc_start_data_req() - starts data request - * @host: MMC host to start the request - * @mrq: data request to start - * - * Sets the done callback to be called when request is completed by the card. - * Starts data mmc request execution - * If an ongoing transfer is already in progress, wait for the command line - * to become available before sending another command. - */ -static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) -{ - int err; - - mmc_wait_ongoing_tfr_cmd(host); - - mrq->done = mmc_wait_data_done; - mrq->host = host; - - err = mmc_start_request(host, mrq); - if (err) { - mrq->cmd->error = err; - mmc_complete_cmd(mrq); - mmc_wait_data_done(mrq); - } - - return err; -} - static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) { int err; @@ -648,132 +605,10 @@ EXPORT_SYMBOL(mmc_cqe_recovery); */ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq) { - if (host->areq) - return host->context_info.is_done_rcv; - else - return completion_done(&mrq->completion); + return completion_done(&mrq->completion); } EXPORT_SYMBOL(mmc_is_req_done); -/** - * mmc_finalize_areq() - finalize an asynchronous request - * @host: MMC host to finalize any ongoing request on - * - * Returns the status of the ongoing asynchronous request, but - * MMC_BLK_SUCCESS if no request was going on. - */ -static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host) -{ - struct mmc_context_info *context_info = &host->context_info; - enum mmc_blk_status status; - - if (!host->areq) - return MMC_BLK_SUCCESS; - - while (1) { - wait_event_interruptible(context_info->wait, - (context_info->is_done_rcv || - context_info->is_new_req)); - - if (context_info->is_done_rcv) { - struct mmc_command *cmd; - - context_info->is_done_rcv = false; - cmd = host->areq->mrq->cmd; - - if (!cmd->error || !cmd->retries || - mmc_card_removed(host->card)) { - status = host->areq->err_check(host->card, - host->areq); - break; /* return status */ - } else { - mmc_retune_recheck(host); - pr_info("%s: req failed (CMD%u): %d, retrying...\n", - mmc_hostname(host), - cmd->opcode, cmd->error); - cmd->retries--; - cmd->error = 0; - __mmc_start_request(host, host->areq->mrq); - continue; /* wait for done/new event again */ - } - } - - return MMC_BLK_NEW_REQUEST; - } - - mmc_retune_release(host); - - /* - * Check BKOPS urgency for each R1 response - */ - if (host->card && mmc_card_mmc(host->card) && - ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || - (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && - (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) { - mmc_start_bkops(host->card, true); - } - - return status; -} - -/** - * mmc_start_areq - start an asynchronous request - * @host: MMC host to start command - * @areq: asynchronous request to start - * @ret_stat: out parameter for status - * - * Start a new MMC custom command request for a host. - * If there is on ongoing async request wait for completion - * of that request and start the new one and return. - * Does not wait for the new request to complete. - * - * Returns the completed request, NULL in case of none completed. - * Wait for the an ongoing request (previoulsy started) to complete and - * return the completed request. If there is no ongoing request, NULL - * is returned without waiting. NULL is not an error condition. - */ -struct mmc_async_req *mmc_start_areq(struct mmc_host *host, - struct mmc_async_req *areq, - enum mmc_blk_status *ret_stat) -{ - enum mmc_blk_status status; - int start_err = 0; - struct mmc_async_req *previous = host->areq; - - /* Prepare a new request */ - if (areq) - mmc_pre_req(host, areq->mrq); - - /* Finalize previous request */ - status = mmc_finalize_areq(host); - if (ret_stat) - *ret_stat = status; - - /* The previous request is still going on... */ - if (status == MMC_BLK_NEW_REQUEST) - return NULL; - - /* Fine so far, start the new request! */ - if (status == MMC_BLK_SUCCESS && areq) - start_err = __mmc_start_data_req(host, areq->mrq); - - /* Postprocess the old request at this point */ - if (host->areq) - mmc_post_req(host, host->areq->mrq, 0); - - /* Cancel a prepared request if it was not started. */ - if ((status != MMC_BLK_SUCCESS || start_err) && areq) - mmc_post_req(host, areq->mrq, -EINVAL); - - if (status != MMC_BLK_SUCCESS) - host->areq = NULL; - else - host->areq = areq; - - return previous; -} -EXPORT_SYMBOL(mmc_start_areq); - /** * mmc_wait_for_req - start a request and wait for completion * @host: MMC host to start command @@ -2961,22 +2796,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host) } #endif -/** - * mmc_init_context_info() - init synchronization context - * @host: mmc host - * - * Init struct context_info needed to implement asynchronous - * request mechanism, used by mmc core, host driver and mmc requests - * supplier. - */ -void mmc_init_context_info(struct mmc_host *host) -{ - host->context_info.is_new_req = false; - host->context_info.is_done_rcv = false; - host->context_info.is_waiting_last_req = false; - init_waitqueue_head(&host->context_info.wait); -} - static int __init mmc_init(void) { int ret; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 3e3d21304e5f..d6303d69071b 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -89,8 +89,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host); void mmc_add_card_debugfs(struct mmc_card *card); void mmc_remove_card_debugfs(struct mmc_card *card); -void mmc_init_context_info(struct mmc_host *host); - int mmc_execute_tuning(struct mmc_card *card); int mmc_hs200_to_hs400(struct mmc_card *card); int mmc_hs400_to_hs200(struct mmc_card *card); @@ -108,12 +106,6 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq); int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq); -struct mmc_async_req; - -struct mmc_async_req *mmc_start_areq(struct mmc_host *host, - struct mmc_async_req *areq, - enum mmc_blk_status *ret_stat); - int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, unsigned int arg); int mmc_can_erase(struct mmc_card *card); diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index 6d896869e5c6..06ec19b5bf9f 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -79,10 +79,5 @@ static inline bool mmc_card_hs400es(struct mmc_card *card) return card->host->ios.enhanced_strobe; } -static inline bool mmc_host_use_blk_mq(struct mmc_host *host) -{ - return true; -} - #endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f3e13c50f6b0..85146235231e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -424,9 +424,6 @@ struct mmc_host { struct dentry *debugfs_root; - struct mmc_async_req *areq; /* active async req */ - struct mmc_context_info context_info; /* async synchronization info */ - /* Ongoing data transfer that allows commands during transfer */ struct mmc_request *ongoing_mrq; From 0562315b86372d2cdd9cc8924b92cfab37049fbc Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 8 Dec 2017 09:31:06 +0200 Subject: [PATCH 38/96] mmc: cqhci: Ensure macro parameters are wrapped in parentheses Absence of parentheses is not affecting current code, but ensure macro parameters are wrapped in parentheses. Reported-by: Dan Carpenter Fixes: a4080225f51d ("mmc: cqhci: support for command queue enabled host") Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/cqhci.h | 42 ++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h index 2d39d361b322..9e68286a07b4 100644 --- a/drivers/mmc/host/cqhci.h +++ b/drivers/mmc/host/cqhci.h @@ -61,9 +61,9 @@ #define CQHCI_IC_ENABLE BIT(31) #define CQHCI_IC_RESET BIT(16) #define CQHCI_IC_ICCTHWEN BIT(15) -#define CQHCI_IC_ICCTH(x) ((x & 0x1F) << 8) +#define CQHCI_IC_ICCTH(x) (((x) & 0x1F) << 8) #define CQHCI_IC_ICTOVALWEN BIT(7) -#define CQHCI_IC_ICTOVAL(x) (x & 0x7F) +#define CQHCI_IC_ICTOVAL(x) ((x) & 0x7F) /* task list base address */ #define CQHCI_TDLBA 0x20 @@ -119,31 +119,31 @@ #define CQHCI_IC_DEFAULT_ICTOVAL 1 /* attribute fields */ -#define CQHCI_VALID(x) ((x & 1) << 0) -#define CQHCI_END(x) ((x & 1) << 1) -#define CQHCI_INT(x) ((x & 1) << 2) -#define CQHCI_ACT(x) ((x & 0x7) << 3) +#define CQHCI_VALID(x) (((x) & 1) << 0) +#define CQHCI_END(x) (((x) & 1) << 1) +#define CQHCI_INT(x) (((x) & 1) << 2) +#define CQHCI_ACT(x) (((x) & 0x7) << 3) /* data command task descriptor fields */ -#define CQHCI_FORCED_PROG(x) ((x & 1) << 6) -#define CQHCI_CONTEXT(x) ((x & 0xF) << 7) -#define CQHCI_DATA_TAG(x) ((x & 1) << 11) -#define CQHCI_DATA_DIR(x) ((x & 1) << 12) -#define CQHCI_PRIORITY(x) ((x & 1) << 13) -#define CQHCI_QBAR(x) ((x & 1) << 14) -#define CQHCI_REL_WRITE(x) ((x & 1) << 15) -#define CQHCI_BLK_COUNT(x) ((x & 0xFFFF) << 16) -#define CQHCI_BLK_ADDR(x) ((x & 0xFFFFFFFF) << 32) +#define CQHCI_FORCED_PROG(x) (((x) & 1) << 6) +#define CQHCI_CONTEXT(x) (((x) & 0xF) << 7) +#define CQHCI_DATA_TAG(x) (((x) & 1) << 11) +#define CQHCI_DATA_DIR(x) (((x) & 1) << 12) +#define CQHCI_PRIORITY(x) (((x) & 1) << 13) +#define CQHCI_QBAR(x) (((x) & 1) << 14) +#define CQHCI_REL_WRITE(x) (((x) & 1) << 15) +#define CQHCI_BLK_COUNT(x) (((x) & 0xFFFF) << 16) +#define CQHCI_BLK_ADDR(x) (((x) & 0xFFFFFFFF) << 32) /* direct command task descriptor fields */ -#define CQHCI_CMD_INDEX(x) ((x & 0x3F) << 16) -#define CQHCI_CMD_TIMING(x) ((x & 1) << 22) -#define CQHCI_RESP_TYPE(x) ((x & 0x3) << 23) +#define CQHCI_CMD_INDEX(x) (((x) & 0x3F) << 16) +#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22) +#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23) /* transfer descriptor fields */ -#define CQHCI_DAT_LENGTH(x) ((x & 0xFFFF) << 16) -#define CQHCI_DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32) -#define CQHCI_DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0) +#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16) +#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32) +#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0) struct cqhci_host_ops; struct mmc_host; From 2361bfb055f948eac6583fa3c75a014da84fe554 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 8 Dec 2017 14:55:16 +0300 Subject: [PATCH 39/96] mmc: block: blk-mq: Potential NULL deref on mmc_blk_alloc_req() failure mmc_blk_alloc_req() is supposed to return error pointers but there is one path where we forget to set the error code and accidentally return NULL. The callers are not expecting that and will have a NULL pointer dereference. Fixes: 41e3efd07d5a ("mmc: block: Simplify cleaning up the queue") Signed-off-by: Dan Carpenter Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 579fc0bd722f..654fc1ebd675 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2328,6 +2328,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, */ if (!blk_get_queue(md->queue.queue)) { mmc_cleanup_queue(&md->queue); + ret = -ENODEV; goto err_putdisk; } From 0cc1a0f4519e26d6498bd85c6e648b21a6cdd3ea Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 8 Dec 2017 15:04:58 +0200 Subject: [PATCH 40/96] mmc: sdhci-acpi: Add setup_host() callback Add a ->setup_host() callback so that device-specific changes can be made to the mmc host controller before it is added. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-acpi.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 1b1ce804d2d7..f7445cf8f7dd 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -76,6 +76,7 @@ struct sdhci_acpi_slot { size_t priv_size; int (*probe_slot)(struct platform_device *, const char *, const char *); int (*remove_slot)(struct platform_device *); + int (*setup_host)(struct platform_device *pdev); }; struct sdhci_acpi_host { @@ -688,10 +689,20 @@ static int sdhci_acpi_probe(struct platform_device *pdev) } } - err = sdhci_add_host(host); + err = sdhci_setup_host(host); if (err) goto err_free; + if (c->slot && c->slot->setup_host) { + err = c->slot->setup_host(pdev); + if (err) + goto err_cleanup; + } + + err = __sdhci_add_host(host); + if (err) + goto err_cleanup; + if (c->use_runtime_pm) { pm_runtime_set_active(dev); pm_suspend_ignore_children(dev, 1); @@ -704,6 +715,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev) return 0; +err_cleanup: + sdhci_cleanup_host(c->host); err_free: sdhci_free_host(c->host); return err; From 0acccf4141a1ac37edab8ed905e97bf7c4be3bce Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 8 Dec 2017 15:08:18 +0200 Subject: [PATCH 41/96] mmc: sdhci-acpi: Avoid broken UHS transfer modes on Intel CHT Intel DSM function 8 has been used to identify transfer modes that are not working on some CHT boards. Add support for that. Signed-off-by: Adrian Hunter Tested-by: Carlo Caione Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-acpi.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index f7445cf8f7dd..264f10327bf9 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -97,14 +97,21 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) return c->slot && (c->slot->flags & flag); } +#define INTEL_DSM_HS_CAPS_SDR25 BIT(0) +#define INTEL_DSM_HS_CAPS_DDR50 BIT(1) +#define INTEL_DSM_HS_CAPS_SDR50 BIT(2) +#define INTEL_DSM_HS_CAPS_SDR104 BIT(3) + enum { INTEL_DSM_FNS = 0, INTEL_DSM_V18_SWITCH = 3, INTEL_DSM_V33_SWITCH = 4, + INTEL_DSM_HS_CAPS = 8, }; struct intel_host { u32 dsm_fns; + u32 hs_caps; }; static const guid_t intel_dsm_guid = @@ -153,6 +160,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, { int err; + intel_host->hs_caps = ~0; + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); if (err) { pr_debug("%s: DSM not supported, error %d\n", @@ -162,6 +171,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, pr_debug("%s: DSM function mask %#x\n", mmc_hostname(mmc), intel_host->dsm_fns); + + intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps); } static int intel_start_signal_voltage_switch(struct mmc_host *mmc, @@ -399,6 +410,26 @@ static int intel_probe_slot(struct platform_device *pdev, const char *hid, return 0; } +static int intel_setup_host(struct platform_device *pdev) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct intel_host *intel_host = sdhci_acpi_priv(c); + + if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25)) + c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25; + + if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50)) + c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50; + + if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50)) + c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50; + + if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104)) + c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104; + + return 0; +} + static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { .chip = &sdhci_acpi_chip_int, .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | @@ -410,6 +441,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { SDHCI_QUIRK2_STOP_WITH_TC | SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400, .probe_slot = intel_probe_slot, + .setup_host = intel_setup_host, .priv_size = sizeof(struct intel_host), }; @@ -422,6 +454,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { .flags = SDHCI_ACPI_RUNTIME_PM, .pm_caps = MMC_PM_KEEP_POWER, .probe_slot = intel_probe_slot, + .setup_host = intel_setup_host, .priv_size = sizeof(struct intel_host), }; @@ -433,6 +466,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { SDHCI_QUIRK2_STOP_WITH_TC, .caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM, .probe_slot = intel_probe_slot, + .setup_host = intel_setup_host, .priv_size = sizeof(struct intel_host), }; From 6b79e77c92a3ab2417cd97dcd5ac981af967e6ad Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Thu, 7 Dec 2017 14:43:22 +0800 Subject: [PATCH 42/96] mmc: dt-bindings: add mmc support to MT7623 SoC Add the devicetree binding for MT7623 SoC using MT2701 as the fallback. Cc: devicetree@vger.kernel.org Signed-off-by: Sean Wang Acked-by: Rob Herring Signed-off-by: Ulf Hansson --- Documentation/devicetree/bindings/mmc/mtk-sd.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt index 72d2a734ab85..9b8017670870 100644 --- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt +++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt @@ -12,6 +12,8 @@ Required properties: "mediatek,mt8173-mmc": for mmc host ip compatible with mt8173 "mediatek,mt2701-mmc": for mmc host ip compatible with mt2701 "mediatek,mt2712-mmc": for mmc host ip compatible with mt2712 + "mediatek,mt7623-mmc", "mediatek,mt2701-mmc": for MT7623 SoC + - reg: physical base address of the controller and length - interrupts: Should contain MSDC interrupt number - clocks: Should contain phandle for the clock feeding the MMC controller From cbcaac6d7dd209f2077480f4297e131c8b90d223 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 19 Nov 2017 10:22:43 +0530 Subject: [PATCH 43/96] mmc: meson-gx-mmc: Fix platform_get_irq's error checking The platform_get_irq() function returns negative if an error occurs. zero or positive number on success. platform_get_irq() error checking for zero is not correct. Signed-off-by: Arvind Yadav Acked-by: Kevin Hilman Signed-off-by: Ulf Hansson --- drivers/mmc/host/meson-gx-mmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index e0862d3f65b3..32a6a228cd12 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1208,7 +1208,7 @@ static int meson_mmc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { + if (irq <= 0) { dev_err(&pdev->dev, "failed to get interrupt resource.\n"); ret = -EINVAL; goto free_host; From 928635c114adefc58aeb1a9f9615cd9d3c24e3e4 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 19 Nov 2017 10:22:44 +0530 Subject: [PATCH 44/96] mmc: s3cmci: Fix platform_get_irq's error checking The platform_get_irq() function returns negative if an error occurs. zero or positive number on success. platform_get_irq() error checking for zero is not correct. Signed-off-by: Arvind Yadav Signed-off-by: Ulf Hansson --- drivers/mmc/host/s3cmci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index f7f157a62a4a..36daee1e6588 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1658,7 +1658,7 @@ static int s3cmci_probe(struct platform_device *pdev) } host->irq = platform_get_irq(pdev, 0); - if (host->irq == 0) { + if (host->irq <= 0) { dev_err(&pdev->dev, "failed to get interrupt resource.\n"); ret = -EINVAL; goto probe_iounmap; From 1b7ba57ecc864173ef42fff7f8c2e9a880b42bd2 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 19 Nov 2017 10:22:45 +0530 Subject: [PATCH 45/96] mmc: sdhci-acpi: Handle return value of platform_get_irq platform_get_irq() can fail here and we must check its return value. Signed-off-by: Arvind Yadav Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-acpi.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 264f10327bf9..4065da58789d 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -680,6 +680,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev) host->hw_name = "ACPI"; host->ops = &sdhci_acpi_ops_dflt; host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { + err = -EINVAL; + goto err_free; + } host->ioaddr = devm_ioremap_nocache(dev, iomem->start, resource_size(iomem)); From 682798a596a6f3ffe796ebbf6a9396bed6dc6de2 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 19 Nov 2017 10:22:46 +0530 Subject: [PATCH 46/96] mmc: sdhci-spear: Handle return value of platform_get_irq platform_get_irq() can fail here and we must check its return value. Signed-off-by: Arvind Yadav Acked-by: Viresh Kumar Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-spear.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index 8c0f88428556..14511526a3a8 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -82,6 +82,10 @@ static int sdhci_probe(struct platform_device *pdev) host->hw_name = "sdhci"; host->ops = &sdhci_pltfm_ops; host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { + ret = -EINVAL; + goto err_host; + } host->quirks = SDHCI_QUIRK_BROKEN_ADMA; sdhci = sdhci_priv(host); From 2408a08583d2711f43716b79cb879df66ad407dc Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 19 Nov 2017 10:22:47 +0530 Subject: [PATCH 47/96] mmc: sunxi-mmc: Handle return value of platform_get_irq platform_get_irq() can fail here and we must check its return value. Signed-off-by: Arvind Yadav Signed-off-by: Ulf Hansson --- drivers/mmc/host/sunxi-mmc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 8fef5c17696e..bad612d6f879 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1255,6 +1255,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, goto error_assert_reset; host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { + ret = -EINVAL; + goto error_assert_reset; + } + return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq, sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host); From c813e10a6bbad9ef56bc115c64d48c5a7d0a7dd5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:36 +0900 Subject: [PATCH 48/96] mmc: renesas_sdhi: consolidate DMAC CONFIG options The description in the Makefile is odd. Fix the CONFIG selection in a cleaner way. Signed-off-by: Masahiro Yamada Acked-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/Kconfig | 4 ++-- drivers/mmc/host/Makefile | 8 ++------ 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 2b02a9788bb6..d63a6ba47501 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -599,8 +599,6 @@ config MMC_SDHI depends on SUPERH || ARM || ARM64 depends on SUPERH || ARCH_RENESAS || COMPILE_TEST select MMC_TMIO_CORE - select MMC_SDHI_SYS_DMAC if (SUPERH || ARM) - select MMC_SDHI_INTERNAL_DMAC if ARM64 help This provides support for the SDHI SD/SDIO controller found in Renesas SuperH, ARM and ARM64 based SoCs @@ -608,6 +606,7 @@ config MMC_SDHI config MMC_SDHI_SYS_DMAC tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC" depends on MMC_SDHI + default MMC_SDHI if (SUPERH || ARM) help This provides DMA support for SDHI SD/SDIO controllers using SYS-DMAC via DMA Engine. This supports the controllers @@ -617,6 +616,7 @@ config MMC_SDHI_INTERNAL_DMAC tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering" depends on ARM64 || COMPILE_TEST depends on MMC_SDHI + default MMC_SDHI if ARM64 help This provides DMA support for SDHI SD/SDIO controllers using on-chip bus mastering. This supports the controllers diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 407a011026cd..191a04010205 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -39,12 +39,8 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o -ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_SYS_DMAC)),y) -obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_sys_dmac.o -endif -ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_INTERNAL_DMAC)),y) -obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_internal_dmac.o -endif +obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o +obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o From 0c36fc0dfb4c0fa068d077b9e2806ef87d0221a7 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 18 Dec 2017 01:00:21 +0100 Subject: [PATCH 49/96] mmc: tmio: use ioread* for repeated access to a register Not all archs define reads* and writes*. Switch to ioread*_rep and friends which is defined everywhere, so we can enable COMPILE_TEST after that. Signed-off-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 3e6ff8921440..cd3d7c8d24bf 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -246,7 +246,7 @@ static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, u16 *buf, int count) { - readsw(host->ctl + (addr << host->bus_shift), buf, count); + ioread16_rep(host->ctl + (addr << host->bus_shift), buf, count); } static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, @@ -259,7 +259,7 @@ static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr, u32 *buf, int count) { - readsl(host->ctl + (addr << host->bus_shift), buf, count); + ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count); } static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, @@ -276,7 +276,7 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, u16 *buf, int count) { - writesw(host->ctl + (addr << host->bus_shift), buf, count); + iowrite16_rep(host->ctl + (addr << host->bus_shift), buf, count); } static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, @@ -289,7 +289,7 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, const u32 *buf, int count) { - writesl(host->ctl + (addr << host->bus_shift), buf, count); + iowrite32_rep(host->ctl + (addr << host->bus_shift), buf, count); } #endif From e578afab6e5f57e7ed22a42d261942b4ac923ffd Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:37 +0900 Subject: [PATCH 50/96] mmc: renesas_sdhi: remove wrong depends on to enable compile test ARCH_RENESAS is a stronger condition than (ARM || ARM64). If ARCH_RENESAS is enabled, (ARM || ARM64) is met as well. What is worse, the first depends on line prevents COMPILE_TEST from enabling this driver. It should be removed. Signed-off-by: Masahiro Yamada Acked-by: Wolfram Sang Reviewed-by: Geert Uytterhoeven Signed-off-by: Ulf Hansson --- drivers/mmc/host/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index d63a6ba47501..cc4fd07735a7 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -596,7 +596,6 @@ config MMC_TMIO config MMC_SDHI tristate "Renesas SDHI SD/SDIO controller support" - depends on SUPERH || ARM || ARM64 depends on SUPERH || ARCH_RENESAS || COMPILE_TEST select MMC_TMIO_CORE help From 4ce6281791676c134d3ae919edaf76da3cef1d76 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:38 +0900 Subject: [PATCH 51/96] mmc: renesas_sdhi: remove eprobe jump label "goto eprobe" does nothing. Return directly. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_core.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index fcf7235d5742..0590ae06cd7b 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -497,7 +497,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (IS_ERR(priv->clk)) { ret = PTR_ERR(priv->clk); dev_err(&pdev->dev, "cannot get clock: %d\n", ret); - goto eprobe; + return ret; } /* @@ -524,10 +524,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, } host = tmio_mmc_host_alloc(pdev); - if (!host) { - ret = -ENOMEM; - goto eprobe; - } + if (!host) + return -ENOMEM; if (of_data) { mmc_data->flags |= of_data->tmio_flags; @@ -652,7 +650,7 @@ eirq: tmio_mmc_host_remove(host); efree: tmio_mmc_host_free(host); -eprobe: + return ret; } EXPORT_SYMBOL_GPL(renesas_sdhi_probe); From a3b05373e0e06dcb04adf2c50b58cd3feb5f8294 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:39 +0900 Subject: [PATCH 52/96] mmc: tmio: set tmio_mmc_host to driver data The remove, suspend, resume hooks need to get tmio_mmc_host. It is tedious to call mmc_priv() to convert mmc_host to tmio_mmc_host. We can directly set tmio_mmc_host to driver data. Signed-off-by: Masahiro Yamada Acked-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_core.c | 3 +-- drivers/mmc/host/tmio_mmc.c | 12 ++++-------- drivers/mmc/host/tmio_mmc_core.c | 8 +++----- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 0590ae06cd7b..9baf4d1791ea 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -657,8 +657,7 @@ EXPORT_SYMBOL_GPL(renesas_sdhi_probe); int renesas_sdhi_remove(struct platform_device *pdev) { - struct mmc_host *mmc = platform_get_drvdata(pdev); - struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_host *host = platform_get_drvdata(pdev); tmio_mmc_host_remove(host); diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 64b7e9f18361..ccfbc154ee5b 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -128,15 +128,11 @@ out: static int tmio_mmc_remove(struct platform_device *pdev) { const struct mfd_cell *cell = mfd_get_cell(pdev); - struct mmc_host *mmc = platform_get_drvdata(pdev); + struct tmio_mmc_host *host = platform_get_drvdata(pdev); - if (mmc) { - struct tmio_mmc_host *host = mmc_priv(mmc); - - tmio_mmc_host_remove(host); - if (cell->disable) - cell->disable(pdev); - } + tmio_mmc_host_remove(host); + if (cell->disable) + cell->disable(pdev); return 0; } diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index d6ca57be16c2..8787a996f6a5 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1193,7 +1193,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, return ret; _host->pdata = pdata; - platform_set_drvdata(pdev, mmc); + platform_set_drvdata(pdev, _host); _host->set_pwr = pdata->set_pwr; _host->set_clk_div = pdata->set_clk_div; @@ -1351,8 +1351,7 @@ EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); #ifdef CONFIG_PM int tmio_mmc_host_runtime_suspend(struct device *dev) { - struct mmc_host *mmc = dev_get_drvdata(dev); - struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_host *host = dev_get_drvdata(dev); tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); @@ -1372,8 +1371,7 @@ static bool tmio_mmc_can_retune(struct tmio_mmc_host *host) int tmio_mmc_host_runtime_resume(struct device *dev) { - struct mmc_host *mmc = dev_get_drvdata(dev); - struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_host *host = dev_get_drvdata(dev); tmio_mmc_reset(host); tmio_mmc_clk_enable(host); From 4139696b7978d57ec840b6c9293d4709a46af3bd Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:40 +0900 Subject: [PATCH 53/96] mmc: tmio: use devm_ioremap_resource() instead of devm_ioremap() The TMIO core misses to call request_mem_region(). devm_ioremap_resource() takes care of it and makes the code cleaner. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Tested-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc_core.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 8787a996f6a5..151a542b75d5 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1185,8 +1185,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, _host->write16_hook = NULL; res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res_ctl) - return -EINVAL; + _host->ctl = devm_ioremap_resource(&pdev->dev, res_ctl); + if (IS_ERR(_host->ctl)) + return PTR_ERR(_host->ctl); ret = mmc_of_parse(mmc); if (ret < 0) @@ -1202,11 +1203,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (ret < 0) return ret; - _host->ctl = devm_ioremap(&pdev->dev, - res_ctl->start, resource_size(res_ctl)); - if (!_host->ctl) - return -ENOMEM; - tmio_mmc_ops.card_busy = _host->card_busy; tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch; From c055fc75c1757b220108489038cfe60496b13865 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:41 +0900 Subject: [PATCH 54/96] mmc: tmio: move mmc_host_ops to struct tmio_mmc_host from static data Currently, tmio_mmc_ops is static data and tmio_mmc_host_probe() updates some hooks in the static data. This is a problem when two or more instances call tmio_mmc_host_probe() and each of them requests to use its own card_busy/start_signal_voltage_switch. We can borrow a solution from sdhci_alloc_host(). Copy the whole ops structure to host->mmc_host_ops, then override the hooks in malloc'ed data. Constify tmio_mmc_ops since it is now a template ops used by default. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Tested-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc.h | 1 + drivers/mmc/host/tmio_mmc_core.c | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index cd3d7c8d24bf..405547f88421 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -134,6 +134,7 @@ struct tmio_mmc_host { struct mmc_request *mrq; struct mmc_data *data; struct mmc_host *mmc; + struct mmc_host_ops ops; /* Callbacks for clock / power control */ void (*set_pwr)(struct platform_device *host, int state); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 151a542b75d5..ea5181d7948d 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1098,7 +1098,7 @@ static int tmio_multi_io_quirk(struct mmc_card *card, return blk_size; } -static struct mmc_host_ops tmio_mmc_ops = { +static const struct mmc_host_ops tmio_mmc_ops = { .request = tmio_mmc_request, .set_ios = tmio_mmc_set_ios, .get_ro = tmio_mmc_get_ro, @@ -1158,6 +1158,8 @@ tmio_mmc_host_alloc(struct platform_device *pdev) host = mmc_priv(mmc); host->mmc = mmc; host->pdev = pdev; + host->ops = tmio_mmc_ops; + mmc->ops = &host->ops; return host; } @@ -1203,10 +1205,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (ret < 0) return ret; - tmio_mmc_ops.card_busy = _host->card_busy; - tmio_mmc_ops.start_signal_voltage_switch = + _host->ops.card_busy = _host->card_busy; + _host->ops.start_signal_voltage_switch = _host->start_signal_voltage_switch; - mmc->ops = &tmio_mmc_ops; mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; mmc->caps2 |= pdata->capabilities2; From 2aaa3c5193db9cdfe62201aa4eb4e1007a43fdc8 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:42 +0900 Subject: [PATCH 55/96] mmc: tmio, renesas_sdhi: set mmc_host_ops hooks directly Drivers can set any mmc_host_ops hooks between tmio_mmc_host_alloc() and tmio_mmc_host_probe(). Remove duplicated hooks in tmio_mmc_host. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Tested-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_core.c | 4 ++-- drivers/mmc/host/tmio_mmc.h | 3 --- drivers/mmc/host/tmio_mmc_core.c | 4 ---- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 9baf4d1791ea..267e2e0077e9 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -549,8 +549,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, /* SDR speeds are only available on Gen2+ */ if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) { /* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */ - host->card_busy = renesas_sdhi_card_busy; - host->start_signal_voltage_switch = + host->ops.card_busy = renesas_sdhi_card_busy; + host->ops.start_signal_voltage_switch = renesas_sdhi_start_signal_voltage_switch; } diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 405547f88421..dd40b9631b3a 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -186,9 +186,6 @@ struct tmio_mmc_host { void (*clk_disable)(struct tmio_mmc_host *host); int (*multi_io_quirk)(struct mmc_card *card, unsigned int direction, int blk_size); - int (*card_busy)(struct mmc_host *mmc); - int (*start_signal_voltage_switch)(struct mmc_host *mmc, - struct mmc_ios *ios); int (*write16_hook)(struct tmio_mmc_host *host, int addr); void (*hw_reset)(struct tmio_mmc_host *host); void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index ea5181d7948d..1abe83b9f568 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1205,10 +1205,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (ret < 0) return ret; - _host->ops.card_busy = _host->card_busy; - _host->ops.start_signal_voltage_switch = - _host->start_signal_voltage_switch; - mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; mmc->caps2 |= pdata->capabilities2; mmc->max_segs = pdata->max_segs ? : 32; From cd82cd213bfa532ca368e4333ba6a0f14185ef9c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:43 +0900 Subject: [PATCH 56/96] mmc: tmio: move mmc_gpio_request_cd() before mmc_add_host() Drivers do not need to call mmc_gpiod_request_cd_irq() explicitly because mmc_start_host() calls it. To make it work, cd_gpio must be set before mmc_add_host(). Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc_core.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 1abe83b9f568..6b18c0509ff4 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1205,6 +1205,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (ret < 0) return ret; + if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { + ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); + if (ret) + return ret; + } + mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; mmc->caps2 |= pdata->capabilities2; mmc->max_segs = pdata->max_segs ? : 32; @@ -1300,14 +1306,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, dev_pm_qos_expose_latency_limit(&pdev->dev, 100); - if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { - ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); - if (ret) - goto remove_host; - - mmc_gpiod_request_cd_irq(mmc); - } - return 0; remove_host: From b4fcb5e5094b5f13e5c9aadea82cdbd1a4f0dd02 Mon Sep 17 00:00:00 2001 From: Pravin Shedge Date: Wed, 6 Dec 2017 22:27:03 +0530 Subject: [PATCH 57/96] mmc: android-goldfish: remove duplicate includes These duplicate includes have been found with scripts/checkincludes.pl but they have been removed manually to avoid removing false positives. Signed-off-by: Pravin Shedge Signed-off-by: Ulf Hansson --- drivers/mmc/host/android-goldfish.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 63fe5091ca59..63d27589cd89 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c @@ -42,13 +42,11 @@ #include #include #include -#include #include #include #include -#include #include #define DRIVER_NAME "goldfish_mmc" From de8dcc3d2c0e08e5068ee1e26fc46415c15e3637 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Tue, 12 Dec 2017 10:49:02 +0000 Subject: [PATCH 58/96] mmc: avoid removing non-removable hosts during suspend The Weibu F3C MiniPC has an onboard AP6255 module, presenting two SDIO functions on a single MMC host (Bluetooth/btsdio and WiFi/brcmfmac), and the mmc layer correctly detects this as non-removable. After suspend/resume, the wifi and bluetooth interfaces disappear and do not get probed again. The conditions here are: 1. During suspend, we reach mmc_pm_notify() 2. mmc_pm_notify() calls mmc_sdio_pre_suspend() to see if we can suspend the SDIO host. However, mmc_sdio_pre_suspend() returns -ENOSYS because btsdio_driver does not have a suspend method. 3. mmc_pm_notify() proceeds to remove the card 4. Upon resume, mmc_rescan() does nothing with this host, because of the rescan_entered check which aims to only scan a non-removable device a single time (i.e. during boot). Fix the loss of functionality by detecting that we are unable to suspend a non-removable host, so avoid the forced removal in that case. The comment above this function already indicates that this code was only intended for removable devices. Signed-off-by: Daniel Drake Signed-off-by: Ulf Hansson --- drivers/mmc/core/core.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index fd64e6d425e5..c0ba6d8823b7 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2761,6 +2761,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block, if (!err) break; + if (!mmc_card_is_removable(host)) { + dev_warn(mmc_dev(host), + "pre_suspend failed for non-removable host: " + "%d\n", err); + /* Avoid removing non-removable hosts */ + break; + } + /* Calling bus_ops->remove() with a claimed host can deadlock */ host->bus_ops->remove(host); mmc_claim_host(host); From 0be55579a127916ebe39db2a74d906a2dfceed42 Mon Sep 17 00:00:00 2001 From: "Liu, Changcheng" Date: Sat, 16 Dec 2017 23:15:45 +0800 Subject: [PATCH 59/96] mmc: block: fix logical error to avoid memory leak If the MMC_DRV_OP_GET_EXT_CSD request completes successfully, then ext_csd must be freed, but in one case it was not. Fix that. Signed-off-by: Liu Changcheng Acked-by: Adrian Hunter Acked-by: Linus Walleij Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 654fc1ebd675..20135a5de748 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2795,6 +2795,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) if (n != EXT_CSD_STR_LEN) { err = -EINVAL; + kfree(ext_csd); goto out_free; } From 2487e7efc9e13bd11fdc86f1ac12a5a45c4af778 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:46 +0900 Subject: [PATCH 60/96] mmc: renesas_sdhi: remove always false condition renesas_sdhi_probe() always sets host->dma as follows: host->dma = dma_priv; !host->dma is always false. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_sys_dmac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 9ab10436e4b8..e210644f1116 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -360,8 +360,8 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) { /* We can only either use DMA for both Tx and Rx or not use it at all */ - if (!host->dma || (!host->pdev->dev.of_node && - (!pdata->chan_priv_tx || !pdata->chan_priv_rx))) + if (!host->pdev->dev.of_node && + (!pdata->chan_priv_tx || !pdata->chan_priv_rx)) return; if (!host->chan_tx && !host->chan_rx) { From 058db2868cd88b5474f26974253407fcbe932c22 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:47 +0900 Subject: [PATCH 61/96] mmc: tmio, renesas_sdhi: move struct tmio_mmc_dma to renesas_sdhi.h struct tmio_mmc_dma looks like TMIO core data, but in fact, Renesas private data. Move it to renesas_sdhi.h (probably, it is better to rename it to renesas_sdhi_dma, or squash it into struct renesas_sdhi). I also moved struct renesas_sdhi and host_to_priv() to that header because they are necessary to convert the tmio_mmc_host pointer into the renesas_sdhi pointer. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi.h | 19 +++++++++++++++++++ drivers/mmc/host/renesas_sdhi_core.c | 14 -------------- drivers/mmc/host/renesas_sdhi_internal_dmac.c | 6 ++++-- drivers/mmc/host/renesas_sdhi_sys_dmac.c | 16 ++++++++++------ drivers/mmc/host/tmio_mmc.h | 7 ------- 5 files changed, 33 insertions(+), 29 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index b9dfea5d8193..9a507b3a9838 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -35,6 +35,25 @@ struct renesas_sdhi_of_data { unsigned short max_segs; }; +struct tmio_mmc_dma { + enum dma_slave_buswidth dma_buswidth; + bool (*filter)(struct dma_chan *chan, void *arg); + void (*enable)(struct tmio_mmc_host *host, bool enable); +}; + +struct renesas_sdhi { + struct clk *clk; + struct clk *clk_cd; + struct tmio_mmc_data mmc_data; + struct tmio_mmc_dma dma_priv; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default, *pins_uhs; + void __iomem *scc_ctl; +}; + +#define host_to_priv(host) \ + container_of((host)->pdata, struct renesas_sdhi, mmc_data) + int renesas_sdhi_probe(struct platform_device *pdev, const struct tmio_mmc_dma_ops *dma_ops); int renesas_sdhi_remove(struct platform_device *pdev); diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index ae8099690b1a..0eb62353630f 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -47,19 +47,6 @@ #define SDHI_VER_GEN3_SD 0xcc10 #define SDHI_VER_GEN3_SDMMC 0xcd10 -#define host_to_priv(host) \ - container_of((host)->pdata, struct renesas_sdhi, mmc_data) - -struct renesas_sdhi { - struct clk *clk; - struct clk *clk_cd; - struct tmio_mmc_data mmc_data; - struct tmio_mmc_dma dma_priv; - struct pinctrl *pinctrl; - struct pinctrl_state *pins_default, *pins_uhs; - void __iomem *scc_ctl; -}; - static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width) { u32 val; @@ -540,7 +527,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->bus_shift = of_data->bus_shift; } - host->dma = dma_priv; host->write16_hook = renesas_sdhi_write16_hook; host->clk_enable = renesas_sdhi_clk_enable; host->clk_update = renesas_sdhi_clk_update; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 396ae8a1c250..9498decf3165 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -103,6 +103,8 @@ renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host, static void renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable) { + struct renesas_sdhi *priv = host_to_priv(host); + if (!host->chan_tx || !host->chan_rx) return; @@ -110,8 +112,8 @@ renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable) renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1, INFO1_CLEAR); - if (host->dma->enable) - host->dma->enable(host, enable); + if (priv->dma_priv.enable) + priv->dma_priv.enable(host, enable); } static void diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index e210644f1116..aeb3838c05e3 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -117,11 +117,13 @@ MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match); static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host, bool enable) { + struct renesas_sdhi *priv = host_to_priv(host); + if (!host->chan_tx || !host->chan_rx) return; - if (host->dma->enable) - host->dma->enable(host, enable); + if (priv->dma_priv.enable) + priv->dma_priv.enable(host, enable); } static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) @@ -359,6 +361,8 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv) static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) { + struct renesas_sdhi *priv = host_to_priv(host); + /* We can only either use DMA for both Tx and Rx or not use it at all */ if (!host->pdev->dev.of_node && (!pdata->chan_priv_tx || !pdata->chan_priv_rx)) @@ -378,7 +382,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, dma_cap_set(DMA_SLAVE, mask); host->chan_tx = dma_request_slave_channel_compat(mask, - host->dma->filter, pdata->chan_priv_tx, + priv->dma_priv.filter, pdata->chan_priv_tx, &host->pdev->dev, "tx"); dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, host->chan_tx); @@ -389,7 +393,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, cfg.direction = DMA_MEM_TO_DEV; cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift); - cfg.dst_addr_width = host->dma->dma_buswidth; + cfg.dst_addr_width = priv->dma_priv.dma_buswidth; if (!cfg.dst_addr_width) cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; cfg.src_addr = 0; @@ -398,7 +402,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, goto ecfgtx; host->chan_rx = dma_request_slave_channel_compat(mask, - host->dma->filter, pdata->chan_priv_rx, + priv->dma_priv.filter, pdata->chan_priv_rx, &host->pdev->dev, "rx"); dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, host->chan_rx); @@ -408,7 +412,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, cfg.direction = DMA_DEV_TO_MEM; cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; - cfg.src_addr_width = host->dma->dma_buswidth; + cfg.src_addr_width = priv->dma_priv.dma_buswidth; if (!cfg.src_addr_width) cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; cfg.dst_addr = 0; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index dd40b9631b3a..ed375a9056de 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -112,12 +112,6 @@ struct tmio_mmc_data; struct tmio_mmc_host; -struct tmio_mmc_dma { - enum dma_slave_buswidth dma_buswidth; - bool (*filter)(struct dma_chan *chan, void *arg); - void (*enable)(struct tmio_mmc_host *host, bool enable); -}; - struct tmio_mmc_dma_ops { void (*start)(struct tmio_mmc_host *host, struct mmc_data *data); void (*enable)(struct tmio_mmc_host *host, bool enable); @@ -149,7 +143,6 @@ struct tmio_mmc_host { struct platform_device *pdev; struct tmio_mmc_data *pdata; - struct tmio_mmc_dma *dma; /* DMA support */ bool force_pio; From 90d9510645765401c56d75f6003d6cb6c1f7ca2a Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:48 +0900 Subject: [PATCH 62/96] mmc: tmio, renesas_sdhi: move Renesas-specific DMA data to renesas_sdhi.h struct tmio_mmc_host has "dma_dataend" and "dma_complete", but in fact, they are Renesas private data. Move them to renesas_sdhi.h Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi.h | 2 ++ drivers/mmc/host/renesas_sdhi_internal_dmac.c | 8 ++++++-- drivers/mmc/host/renesas_sdhi_sys_dmac.c | 15 ++++++++++----- drivers/mmc/host/tmio_mmc.h | 2 -- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index 9a507b3a9838..3250dbed402f 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -39,6 +39,8 @@ struct tmio_mmc_dma { enum dma_slave_buswidth dma_buswidth; bool (*filter)(struct dma_chan *chan, void *arg); void (*enable)(struct tmio_mmc_host *host, bool enable); + struct completion dma_dataend; + struct tasklet_struct dma_complete; }; struct renesas_sdhi { diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 9498decf3165..7c03cfead6f9 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -132,7 +132,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { static void renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) { - tasklet_schedule(&host->dma_complete); + struct renesas_sdhi *priv = host_to_priv(host); + + tasklet_schedule(&priv->dma_priv.dma_complete); } static void @@ -222,10 +224,12 @@ static void renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) { + struct renesas_sdhi *priv = host_to_priv(host); + /* Each value is set to non-zero to assume "enabling" each DMA */ host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; - tasklet_init(&host->dma_complete, + tasklet_init(&priv->dma_priv.dma_complete, renesas_sdhi_internal_dmac_complete_tasklet_fn, (unsigned long)host); tasklet_init(&host->dma_issue, diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index aeb3838c05e3..c8a74b2dee00 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -140,12 +140,15 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host) { - complete(&host->dma_dataend); + struct renesas_sdhi *priv = host_to_priv(host); + + complete(&priv->dma_priv.dma_dataend); } static void renesas_sdhi_sys_dmac_dma_callback(void *arg) { struct tmio_mmc_host *host = arg; + struct renesas_sdhi *priv = host_to_priv(host); spin_lock_irq(&host->lock); @@ -163,7 +166,7 @@ static void renesas_sdhi_sys_dmac_dma_callback(void *arg) spin_unlock_irq(&host->lock); - wait_for_completion(&host->dma_dataend); + wait_for_completion(&priv->dma_priv.dma_dataend); spin_lock_irq(&host->lock); tmio_mmc_do_data_irq(host); @@ -173,6 +176,7 @@ out: static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) { + struct renesas_sdhi *priv = host_to_priv(host); struct scatterlist *sg = host->sg_ptr, *sg_tmp; struct dma_async_tx_descriptor *desc = NULL; struct dma_chan *chan = host->chan_rx; @@ -216,7 +220,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) DMA_CTRL_ACK); if (desc) { - reinit_completion(&host->dma_dataend); + reinit_completion(&priv->dma_priv.dma_dataend); desc->callback = renesas_sdhi_sys_dmac_dma_callback; desc->callback_param = host; @@ -247,6 +251,7 @@ pio: static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) { + struct renesas_sdhi *priv = host_to_priv(host); struct scatterlist *sg = host->sg_ptr, *sg_tmp; struct dma_async_tx_descriptor *desc = NULL; struct dma_chan *chan = host->chan_tx; @@ -295,7 +300,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) DMA_CTRL_ACK); if (desc) { - reinit_completion(&host->dma_dataend); + reinit_completion(&priv->dma_priv.dma_dataend); desc->callback = renesas_sdhi_sys_dmac_dma_callback; desc->callback_param = host; @@ -424,7 +429,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, if (!host->bounce_buf) goto ebouncebuf; - init_completion(&host->dma_dataend); + init_completion(&priv->dma_priv.dma_dataend); tasklet_init(&host->dma_issue, renesas_sdhi_sys_dmac_issue_tasklet_fn, (unsigned long)host); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index ed375a9056de..5972438105a3 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -148,8 +148,6 @@ struct tmio_mmc_host { bool force_pio; struct dma_chan *chan_rx; struct dma_chan *chan_tx; - struct completion dma_dataend; - struct tasklet_struct dma_complete; struct tasklet_struct dma_issue; struct scatterlist bounce_sg; u8 *bounce_buf; From 852d258f8465aa65adcce99f28552dd9b66a14a7 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:49 +0900 Subject: [PATCH 63/96] mmc: tmio,renesas_sdhi: move ssc_tappos to renesas_sdhi.h struct tmio_mmc_host has "scc_tappos", but in fact, it is Renesas private data. Move it to renesas_sdhi.h Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi.h | 1 + drivers/mmc/host/renesas_sdhi_core.c | 4 ++-- drivers/mmc/host/tmio_mmc.h | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index 3250dbed402f..f13f798d8506 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -51,6 +51,7 @@ struct renesas_sdhi { struct pinctrl *pinctrl; struct pinctrl_state *pins_default, *pins_uhs; void __iomem *scc_ctl; + u32 scc_tappos; }; #define host_to_priv(host) \ diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 0eb62353630f..6a2988bd51a2 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -268,7 +268,7 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host) ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN & sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); - sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos); + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos); /* Read TAPNUM */ return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >> @@ -591,7 +591,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, for (i = 0; i < of_data->taps_num; i++) { if (taps[i].clk_rate == 0 || taps[i].clk_rate == host->mmc->f_max) { - host->scc_tappos = taps->tap; + priv->scc_tappos = taps->tap; hit = true; break; } diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 5972438105a3..a099fde27026 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -166,7 +166,6 @@ struct tmio_mmc_host { struct mutex ios_lock; /* protect set_ios() context */ bool native_hotplug; bool sdio_irq_enabled; - u32 scc_tappos; /* Mandatory callback */ int (*clk_enable)(struct tmio_mmc_host *host); From c4ba0e4abda39fb1ca81683be068b4556b2680d4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:50 +0900 Subject: [PATCH 64/96] mmc: tmio: change bus_shift to unsigned int Sane values for bus_shift are: 0 - for 16 bit bus 1 - for 32 bit bus 2 - for 64 bit bus "unsigned long" is too much. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index a099fde27026..15537c85c51a 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -139,7 +139,7 @@ struct tmio_mmc_host { struct scatterlist *sg_orig; unsigned int sg_len; unsigned int sg_off; - unsigned long bus_shift; + unsigned int bus_shift; struct platform_device *pdev; struct tmio_mmc_data *pdata; From 8d876bf472dba73c015cea9feea80dcb80626a7c Mon Sep 17 00:00:00 2001 From: Zhoujie Wu Date: Mon, 18 Dec 2017 14:38:47 -0800 Subject: [PATCH 65/96] mmc: sdhci-xenon: wait 5ms after set 1.8V signal enable According to SD spec 3.00 3.6.1 signal voltage switch procedure step 6~8, (6) Set 1.8V Signal Enable in the Host Control 2 register. (7) Wait 5ms. 1.8V voltage regulator shall be stable within this period. (8) If 1.8V Signal Enable is cleared by Host Controller, go to step (12). Host should wait 5ms after set 1.8V signal enable bit in Host Control 2 register and check if 1.8V is stable or not. But current code checks this bit right after set it. On some platforms with xenon controller found the bit is cleared right away and host reports "1.8V regulator output did not became stable" and 5ms delay can help. Implement voltage_switch callback for xenon controller to add 5ms delay to make sure the 1.8V signal enable bit is set by controller. Signed-off-by: Zhoujie Wu Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-xenon.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 0842bbc2d7ad..4d0791f6ec23 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -230,7 +230,14 @@ static void xenon_set_power(struct sdhci_host *host, unsigned char mode, mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); } +static void xenon_voltage_switch(struct sdhci_host *host) +{ + /* Wait for 5ms after set 1.8V signal enable bit */ + usleep_range(5000, 5500); +} + static const struct sdhci_ops sdhci_xenon_ops = { + .voltage_switch = xenon_voltage_switch, .set_clock = sdhci_set_clock, .set_power = xenon_set_power, .set_bus_width = sdhci_set_bus_width, From d63da8c64bbf800a12fe0a4a2804e5953b8cf35e Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 19 Dec 2017 14:34:03 +0100 Subject: [PATCH 66/96] mmc: tmio: use io* accessors consistently Because we started using io*_rep accessors previously because they are more widely defined across architectures, let's be consistent and use this family for all accessor wrappers. Signed-off-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 15537c85c51a..52198f2929a5 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -228,7 +228,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev); static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) { - return readw(host->ctl + (addr << host->bus_shift)); + return ioread16(host->ctl + (addr << host->bus_shift)); } static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, @@ -240,8 +240,8 @@ static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int addr) { - return readw(host->ctl + (addr << host->bus_shift)) | - readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; + return ioread16(host->ctl + (addr << host->bus_shift)) | + ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16; } static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr, @@ -258,7 +258,7 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, */ if (host->write16_hook && host->write16_hook(host, addr)) return; - writew(val, host->ctl + (addr << host->bus_shift)); + iowrite16(val, host->ctl + (addr << host->bus_shift)); } static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, @@ -270,8 +270,8 @@ static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val) { - writew(val & 0xffff, host->ctl + (addr << host->bus_shift)); - writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); + iowrite16(val & 0xffff, host->ctl + (addr << host->bus_shift)); + iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); } static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, From d72d72cd33ad59134de203b2fc4e2e4cc81e72c5 Mon Sep 17 00:00:00 2001 From: Atul Garg Date: Wed, 3 Jan 2018 20:17:36 -0800 Subject: [PATCH 67/96] mmc:host:sdhci-pci:Addition of Arasan PCI Controller with integrated phy. The Arasan Controller is based on a FPGA platform and has integrated phy with specific registers used during initialization and management of different modes. The phy and the controller are integrated and registers are very specific to Arasan. Arasan being an IP provider, licenses these IPs to various companies for integration of IP in custom SOCs. The custom SOCs define own register map depending on how bits are tied inside the SOC for phy registers, depending on SOC memory plan and hence will require own platform drivers. If more details on phy registers are required, an interface document is hosted at https://arasan.com/NF/eMMC5.1 PHY Programming in Linux.pdf. Signed-off-by: Atul Garg Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/Makefile | 2 +- drivers/mmc/host/sdhci-pci-arasan.c | 331 ++++++++++++++++++++++++++++ drivers/mmc/host/sdhci-pci-core.c | 4 +- drivers/mmc/host/sdhci-pci.h | 7 +- 4 files changed, 340 insertions(+), 4 deletions(-) create mode 100644 drivers/mmc/host/sdhci-pci-arasan.c diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 191a04010205..84cd1388abc3 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -11,7 +11,7 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o obj-$(CONFIG_MMC_MXS) += mxs-mmc.o obj-$(CONFIG_MMC_SDHCI) += sdhci.o obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o -sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o +sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o diff --git a/drivers/mmc/host/sdhci-pci-arasan.c b/drivers/mmc/host/sdhci-pci-arasan.c new file mode 100644 index 000000000000..499f3205ec5c --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-arasan.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sdhci-pci-arasan.c - Driver for Arasan PCI Controller with + * integrated phy. + * + * Copyright (C) 2017 Arasan Chip Systems Inc. + * + * Author: Atul Garg + */ + +#include +#include + +#include "sdhci.h" +#include "sdhci-pci.h" + +/* Extra registers for Arasan SD/SDIO/MMC Host Controller with PHY */ +#define PHY_ADDR_REG 0x300 +#define PHY_DAT_REG 0x304 + +#define PHY_WRITE BIT(8) +#define PHY_BUSY BIT(9) +#define DATA_MASK 0xFF + +/* PHY Specific Registers */ +#define DLL_STATUS 0x00 +#define IPAD_CTRL1 0x01 +#define IPAD_CTRL2 0x02 +#define IPAD_STS 0x03 +#define IOREN_CTRL1 0x06 +#define IOREN_CTRL2 0x07 +#define IOPU_CTRL1 0x08 +#define IOPU_CTRL2 0x09 +#define ITAP_DELAY 0x0C +#define OTAP_DELAY 0x0D +#define STRB_SEL 0x0E +#define CLKBUF_SEL 0x0F +#define MODE_CTRL 0x11 +#define DLL_TRIM 0x12 +#define CMD_CTRL 0x20 +#define DATA_CTRL 0x21 +#define STRB_CTRL 0x22 +#define CLK_CTRL 0x23 +#define PHY_CTRL 0x24 + +#define DLL_ENBL BIT(3) +#define RTRIM_EN BIT(1) +#define PDB_ENBL BIT(1) +#define RETB_ENBL BIT(6) +#define ODEN_CMD BIT(1) +#define ODEN_DAT 0xFF +#define REN_STRB BIT(0) +#define REN_CMND BIT(1) +#define REN_DATA 0xFF +#define PU_CMD BIT(1) +#define PU_DAT 0xFF +#define ITAPDLY_EN BIT(0) +#define OTAPDLY_EN BIT(0) +#define OD_REL_CMD BIT(1) +#define OD_REL_DAT 0xFF +#define DLLTRM_ICP 0x8 +#define PDB_CMND BIT(0) +#define PDB_DATA 0xFF +#define PDB_STRB BIT(0) +#define PDB_CLOCK BIT(0) +#define CALDONE_MASK 0x10 +#define DLL_RDY_MASK 0x10 +#define MAX_CLK_BUF 0x7 + +/* Mode Controls */ +#define ENHSTRB_MODE BIT(0) +#define HS400_MODE BIT(1) +#define LEGACY_MODE BIT(2) +#define DDR50_MODE BIT(3) + +/* + * Controller has no specific bits for HS200/HS. + * Used BIT(4), BIT(5) for software programming. + */ +#define HS200_MODE BIT(4) +#define HISPD_MODE BIT(5) + +#define OTAPDLY(x) (((x) << 1) | OTAPDLY_EN) +#define ITAPDLY(x) (((x) << 1) | ITAPDLY_EN) +#define FREQSEL(x) (((x) << 5) | DLL_ENBL) +#define IOPAD(x, y) ((x) | ((y) << 2)) + +/* Arasan private data */ +struct arasan_host { + u32 chg_clk; +}; + +static int arasan_phy_addr_poll(struct sdhci_host *host, u32 offset, u32 mask) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 100); + bool failed; + u8 val = 0; + + while (1) { + failed = ktime_after(ktime_get(), timeout); + val = sdhci_readw(host, PHY_ADDR_REG); + if (!(val & mask)) + return 0; + if (failed) + return -EBUSY; + } +} + +static int arasan_phy_write(struct sdhci_host *host, u8 data, u8 offset) +{ + sdhci_writew(host, data, PHY_DAT_REG); + sdhci_writew(host, (PHY_WRITE | offset), PHY_ADDR_REG); + return arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY); +} + +static int arasan_phy_read(struct sdhci_host *host, u8 offset, u8 *data) +{ + int ret; + + sdhci_writew(host, 0, PHY_DAT_REG); + sdhci_writew(host, offset, PHY_ADDR_REG); + ret = arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY); + + /* Masking valid data bits */ + *data = sdhci_readw(host, PHY_DAT_REG) & DATA_MASK; + return ret; +} + +static int arasan_phy_sts_poll(struct sdhci_host *host, u32 offset, u32 mask) +{ + int ret; + ktime_t timeout = ktime_add_us(ktime_get(), 100); + bool failed; + u8 val = 0; + + while (1) { + failed = ktime_after(ktime_get(), timeout); + ret = arasan_phy_read(host, offset, &val); + if (ret) + return -EBUSY; + else if (val & mask) + return 0; + if (failed) + return -EBUSY; + } +} + +/* Initialize the Arasan PHY */ +static int arasan_phy_init(struct sdhci_host *host) +{ + int ret; + u8 val; + + /* Program IOPADs and wait for calibration to be done */ + if (arasan_phy_read(host, IPAD_CTRL1, &val) || + arasan_phy_write(host, val | RETB_ENBL | PDB_ENBL, IPAD_CTRL1) || + arasan_phy_read(host, IPAD_CTRL2, &val) || + arasan_phy_write(host, val | RTRIM_EN, IPAD_CTRL2)) + return -EBUSY; + ret = arasan_phy_sts_poll(host, IPAD_STS, CALDONE_MASK); + if (ret) + return -EBUSY; + + /* Program CMD/Data lines */ + if (arasan_phy_read(host, IOREN_CTRL1, &val) || + arasan_phy_write(host, val | REN_CMND | REN_STRB, IOREN_CTRL1) || + arasan_phy_read(host, IOPU_CTRL1, &val) || + arasan_phy_write(host, val | PU_CMD, IOPU_CTRL1) || + arasan_phy_read(host, CMD_CTRL, &val) || + arasan_phy_write(host, val | PDB_CMND, CMD_CTRL) || + arasan_phy_read(host, IOREN_CTRL2, &val) || + arasan_phy_write(host, val | REN_DATA, IOREN_CTRL2) || + arasan_phy_read(host, IOPU_CTRL2, &val) || + arasan_phy_write(host, val | PU_DAT, IOPU_CTRL2) || + arasan_phy_read(host, DATA_CTRL, &val) || + arasan_phy_write(host, val | PDB_DATA, DATA_CTRL) || + arasan_phy_read(host, STRB_CTRL, &val) || + arasan_phy_write(host, val | PDB_STRB, STRB_CTRL) || + arasan_phy_read(host, CLK_CTRL, &val) || + arasan_phy_write(host, val | PDB_CLOCK, CLK_CTRL) || + arasan_phy_read(host, CLKBUF_SEL, &val) || + arasan_phy_write(host, val | MAX_CLK_BUF, CLKBUF_SEL) || + arasan_phy_write(host, LEGACY_MODE, MODE_CTRL)) + return -EBUSY; + return 0; +} + +/* Set Arasan PHY for different modes */ +static int arasan_phy_set(struct sdhci_host *host, u8 mode, u8 otap, + u8 drv_type, u8 itap, u8 trim, u8 clk) +{ + u8 val; + int ret; + + if (mode == HISPD_MODE || mode == HS200_MODE) + ret = arasan_phy_write(host, 0x0, MODE_CTRL); + else + ret = arasan_phy_write(host, mode, MODE_CTRL); + if (ret) + return ret; + if (mode == HS400_MODE || mode == HS200_MODE) { + ret = arasan_phy_read(host, IPAD_CTRL1, &val); + if (ret) + return ret; + ret = arasan_phy_write(host, IOPAD(val, drv_type), IPAD_CTRL1); + if (ret) + return ret; + } + if (mode == LEGACY_MODE) { + ret = arasan_phy_write(host, 0x0, OTAP_DELAY); + if (ret) + return ret; + ret = arasan_phy_write(host, 0x0, ITAP_DELAY); + } else { + ret = arasan_phy_write(host, OTAPDLY(otap), OTAP_DELAY); + if (ret) + return ret; + if (mode != HS200_MODE) + ret = arasan_phy_write(host, ITAPDLY(itap), ITAP_DELAY); + else + ret = arasan_phy_write(host, 0x0, ITAP_DELAY); + } + if (ret) + return ret; + if (mode != LEGACY_MODE) { + ret = arasan_phy_write(host, trim, DLL_TRIM); + if (ret) + return ret; + } + ret = arasan_phy_write(host, 0, DLL_STATUS); + if (ret) + return ret; + if (mode != LEGACY_MODE) { + ret = arasan_phy_write(host, FREQSEL(clk), DLL_STATUS); + if (ret) + return ret; + ret = arasan_phy_sts_poll(host, DLL_STATUS, DLL_RDY_MASK); + if (ret) + return -EBUSY; + } + return 0; +} + +static int arasan_select_phy_clock(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct arasan_host *arasan_host = sdhci_pci_priv(slot); + u8 clk; + + if (arasan_host->chg_clk == host->mmc->ios.clock) + return 0; + + arasan_host->chg_clk = host->mmc->ios.clock; + if (host->mmc->ios.clock == 200000000) + clk = 0x0; + else if (host->mmc->ios.clock == 100000000) + clk = 0x2; + else if (host->mmc->ios.clock == 50000000) + clk = 0x1; + else + clk = 0x0; + + if (host->mmc_host_ops.hs400_enhanced_strobe) { + arasan_phy_set(host, ENHSTRB_MODE, 1, 0x0, 0x0, + DLLTRM_ICP, clk); + } else { + switch (host->mmc->ios.timing) { + case MMC_TIMING_LEGACY: + arasan_phy_set(host, LEGACY_MODE, 0x0, 0x0, 0x0, + 0x0, 0x0); + break; + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + arasan_phy_set(host, HISPD_MODE, 0x3, 0x0, 0x2, + DLLTRM_ICP, clk); + break; + case MMC_TIMING_MMC_HS200: + case MMC_TIMING_UHS_SDR104: + arasan_phy_set(host, HS200_MODE, 0x2, + host->mmc->ios.drv_type, 0x0, + DLLTRM_ICP, clk); + break; + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + arasan_phy_set(host, DDR50_MODE, 0x1, 0x0, + 0x0, DLLTRM_ICP, clk); + break; + case MMC_TIMING_MMC_HS400: + arasan_phy_set(host, HS400_MODE, 0x1, + host->mmc->ios.drv_type, 0xa, + DLLTRM_ICP, clk); + break; + default: + break; + } + } + return 0; +} + +static int arasan_pci_probe_slot(struct sdhci_pci_slot *slot) +{ + int err; + + slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_8_BIT_DATA; + err = arasan_phy_init(slot->host); + if (err) + return -ENODEV; + return 0; +} + +static void arasan_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +{ + sdhci_set_clock(host, clock); + + /* Change phy settings for the new clock */ + arasan_select_phy_clock(host); +} + +static const struct sdhci_ops arasan_sdhci_pci_ops = { + .set_clock = arasan_sdhci_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +const struct sdhci_pci_fixes sdhci_arasan = { + .probe_slot = arasan_pci_probe_slot, + .ops = &arasan_sdhci_pci_ops, + .priv_size = sizeof(struct arasan_host), +}; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 110c634cfb43..c5b229b46314 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -35,7 +35,6 @@ #include "sdhci.h" #include "sdhci-pci.h" -static int sdhci_pci_enable_dma(struct sdhci_host *host); static void sdhci_pci_hw_reset(struct sdhci_host *host); #ifdef CONFIG_PM_SLEEP @@ -1459,6 +1458,7 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(O2, SDS1, o2), SDHCI_PCI_DEVICE(O2, SEABIRD0, o2), SDHCI_PCI_DEVICE(O2, SEABIRD1, o2), + SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan), SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd), /* Generic SD host controller */ {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)}, @@ -1473,7 +1473,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids); * * \*****************************************************************************/ -static int sdhci_pci_enable_dma(struct sdhci_host *host) +int sdhci_pci_enable_dma(struct sdhci_host *host) { struct sdhci_pci_slot *slot; struct pci_dev *pdev; diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 0056f08a29cc..5cbcdc448f98 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -55,6 +55,9 @@ #define PCI_SUBDEVICE_ID_NI_7884 0x7884 +#define PCI_VENDOR_ID_ARASAN 0x16e6 +#define PCI_DEVICE_ID_ARASAN_PHY_EMMC 0x0670 + /* * PCI device class and mask */ @@ -170,11 +173,13 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot) #ifdef CONFIG_PM_SLEEP int sdhci_pci_resume_host(struct sdhci_pci_chip *chip); #endif - +int sdhci_pci_enable_dma(struct sdhci_host *host); int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); #ifdef CONFIG_PM_SLEEP int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); #endif +extern const struct sdhci_pci_fixes sdhci_arasan; + #endif /* __SDHCI_PCI_H */ From 371d39fabcebc3cb042a0a9c4bc2f494d6cde02c Mon Sep 17 00:00:00 2001 From: Michael Trimarchi Date: Thu, 4 Jan 2018 16:30:57 +0100 Subject: [PATCH 68/96] mmc: sdhci-esdhc-imx: Manage sdhci_runtime_suspend_host error code We need to return in case of error even if the actual implementation of sdhci_runtime_suspend_host always return 0. We don't want to power down the clock and the assuption is that the sdhci_runtime_suspend_host always let the system consistent in case of failure Signed-off-by: Michael Trimarchi Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-esdhc-imx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 85140c9af581..d08c21e511dd 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1389,6 +1389,8 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev) int ret; ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); From a0ad3087586d0945a3cdfa755f951995dfc1b17e Mon Sep 17 00:00:00 2001 From: Michael Trimarchi Date: Thu, 4 Jan 2018 16:30:58 +0100 Subject: [PATCH 69/96] mmc: sdhci-esdhc-imx: Changes the order of how clocks are being re-enabled runtime_resume() should re-enable the clocks in reverse order comparing with runtime_suspend() Signed-off-by: Michael Trimarchi Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-esdhc-imx.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d08c21e511dd..6d4e3233a920 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1411,31 +1411,33 @@ static int sdhci_esdhc_runtime_resume(struct device *dev) struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); int err; + err = clk_prepare_enable(imx_data->clk_ahb); + if (err) + return err; + if (!sdhci_sdio_irq_enabled(host)) { err = clk_prepare_enable(imx_data->clk_per); if (err) - return err; + goto disable_ahb_clk; err = clk_prepare_enable(imx_data->clk_ipg); if (err) goto disable_per_clk; } - err = clk_prepare_enable(imx_data->clk_ahb); - if (err) - goto disable_ipg_clk; + err = sdhci_runtime_resume_host(host); if (err) - goto disable_ahb_clk; + goto disable_ipg_clk; return 0; -disable_ahb_clk: - clk_disable_unprepare(imx_data->clk_ahb); disable_ipg_clk: if (!sdhci_sdio_irq_enabled(host)) clk_disable_unprepare(imx_data->clk_ipg); disable_per_clk: if (!sdhci_sdio_irq_enabled(host)) clk_disable_unprepare(imx_data->clk_per); +disable_ahb_clk: + clk_disable_unprepare(imx_data->clk_ahb); return err; } #endif From 3602785b341a9545eee97f6ce634091da1a2b56d Mon Sep 17 00:00:00 2001 From: Michael Trimarchi Date: Thu, 4 Jan 2018 16:30:59 +0100 Subject: [PATCH 70/96] mmc: sdhci-esdhc-imx: Enable/Disable mmc clock during runtime suspend mmc clock can be stopped during runtime suspend and restart during runtime resume if the sdio irq is not enabled. Stop sdio clock reduce EMI of the device when the bus is not in use. Signed-off-by: Michael Trimarchi Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-esdhc-imx.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 6d4e3233a920..53cc1b6caf8f 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -193,6 +193,7 @@ struct pltfm_imx_data { struct clk *clk_ipg; struct clk *clk_ahb; struct clk *clk_per; + unsigned int actual_clock; enum { NO_CMD_PENDING, /* no multiblock command pending */ MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ @@ -1396,6 +1397,8 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev) mmc_retune_needed(host->mmc); if (!sdhci_sdio_irq_enabled(host)) { + imx_data->actual_clock = host->mmc->actual_clock; + esdhc_pltfm_set_clock(host, 0); clk_disable_unprepare(imx_data->clk_per); clk_disable_unprepare(imx_data->clk_ipg); } @@ -1422,6 +1425,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev) err = clk_prepare_enable(imx_data->clk_ipg); if (err) goto disable_per_clk; + esdhc_pltfm_set_clock(host, imx_data->actual_clock); } err = sdhci_runtime_resume_host(host); From 90e1d8ccdbdb2384948c5f5067b0f28848ca339f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 8 Jan 2018 15:44:19 +0000 Subject: [PATCH 71/96] mmc: sdhci_f_sdh30: add ACPI support The Fujitsu SDH30 SDHCI controller may be described as a SCX0002 ACPI device on ACPI platforms incorporating the Socionext SynQuacer SoC. Given that mmc_of_parse() has already been made ACPI/DT agnostic, making the SDH30 driver ACPI capable is actually rather simple: all we need to do is make the call to sdhci_get_of_property() [which does not set any properties we care about] and the clock handling dependent on whether we are dealing with a DT device, and exposing the ACPI id via the platform_driver struct and the module metadata. Signed-off-by: Ard Biesheuvel Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/Kconfig | 2 +- drivers/mmc/host/sdhci_f_sdh30.c | 56 ++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index cc4fd07735a7..945ba50e6e6e 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -321,7 +321,7 @@ config MMC_SDHCI_BCM_KONA config MMC_SDHCI_F_SDH30 tristate "SDHCI support for Fujitsu Semiconductor F_SDH30" depends on MMC_SDHCI_PLTFM - depends on OF + depends on OF || ACPI help This selects the Secure Digital Host Controller Interface (SDHCI) Needed by some Fujitsu SoC for MMC / SD / SDIO support. diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 04ca0d33a521..485f7591fae4 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -10,9 +10,11 @@ * the Free Software Foundation, version 2 of the License. */ +#include #include #include #include +#include #include #include @@ -146,7 +148,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - sdhci_get_of_property(pdev); host->hw_name = "f_sdh30"; host->ops = &sdhci_f_sdh30_ops; host->irq = irq; @@ -158,26 +159,30 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) goto err; } - priv->clk_iface = devm_clk_get(&pdev->dev, "iface"); - if (IS_ERR(priv->clk_iface)) { - ret = PTR_ERR(priv->clk_iface); - goto err; + if (dev_of_node(dev)) { + sdhci_get_of_property(pdev); + + priv->clk_iface = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(priv->clk_iface)) { + ret = PTR_ERR(priv->clk_iface); + goto err; + } + + ret = clk_prepare_enable(priv->clk_iface); + if (ret) + goto err; + + priv->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto err_clk; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + goto err_clk; } - ret = clk_prepare_enable(priv->clk_iface); - if (ret) - goto err; - - priv->clk = devm_clk_get(&pdev->dev, "core"); - if (IS_ERR(priv->clk)) { - ret = PTR_ERR(priv->clk); - goto err_clk; - } - - ret = clk_prepare_enable(priv->clk); - if (ret) - goto err_clk; - /* init vendor specific regs */ ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG); ctrl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 | @@ -226,16 +231,27 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_OF static const struct of_device_id f_sdh30_dt_ids[] = { { .compatible = "fujitsu,mb86s70-sdhci-3.0" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id f_sdh30_acpi_ids[] = { + { "SCX0002" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids); +#endif static struct platform_driver sdhci_f_sdh30_driver = { .driver = { .name = "f_sdh30", - .of_match_table = f_sdh30_dt_ids, + .of_match_table = of_match_ptr(f_sdh30_dt_ids), + .acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids), .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_f_sdh30_probe, From b8155d3ff3ebbdfa10c6ec6c5f04b263670727e6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 9 Jan 2018 12:39:10 +0300 Subject: [PATCH 72/96] mmc: tmio, renesas_sdhi: Remove unneeded NULL check The inconsistent NULL checking in this function causes static checker warnings. drivers/mmc/host/renesas_sdhi_sys_dmac.c:360 renesas_sdhi_sys_dmac_issue_tasklet_fn() error: we previously assumed 'host' could be null (see line 351) On reviewing this code, "host" can't ever be NULL so we can just remove the check. Signed-off-by: Dan Carpenter Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_sys_dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index c8a74b2dee00..82d757c480b2 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -348,7 +348,7 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv) spin_lock_irq(&host->lock); - if (host && host->data) { + if (host->data) { if (host->data->flags & MMC_DATA_READ) chan = host->chan_rx; else From 2a609abe71ca59e4bd7139e161eaca2144ae6f2e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 11 Jan 2018 15:51:58 +0200 Subject: [PATCH 73/96] sdhci: Advertise 2.0v supply on SDIO host controller On Intel Edison the Broadcom Wi-Fi card, which is connected to SDIO, requires 2.0v, while the host, according to Intel Merrifield TRM, supports 1.8v supply only. The card announces itself as mmc2: new ultra high speed DDR50 SDIO card at address 0001 Introduce a custom OCR mask for SDIO host controller on Intel Merrifield and add a special case to sdhci_set_power_noreg() to override 2.0v supply by enforcing 1.8v power choice. Signed-off-by: Andy Shevchenko Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-pci-core.c | 2 ++ drivers/mmc/host/sdhci.c | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index c5b229b46314..00fa7a36b336 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -930,6 +930,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; break; case INTEL_MRFLD_SDIO: + /* Advertise 2.0v for compatibility with the SDIO card's OCR */ + slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195; slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD; break; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index e9290a3439d5..80b1a59bc3c5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1434,6 +1434,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, if (mode != MMC_POWER_OFF) { switch (1 << vdd) { case MMC_VDD_165_195: + /* + * Without a regulator, SDHCI does not support 2.0v + * so we only get here if the driver deliberately + * added the 2.0v range to ocr_avail. Map it to 1.8v + * for the purpose of turning on the power. + */ + case MMC_VDD_20_21: pwr = SDHCI_POWER_180; break; case MMC_VDD_29_30: From de21dc1d9a2a9fc5023c1fe3a24ba21e68c34928 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 25 Nov 2017 01:24:44 +0900 Subject: [PATCH 74/96] mmc: tmio: use mmc_can_gpio_cd() instead of checking TMIO_MMC_USE_GPIO_CD To use a GPIO line for card detection, TMIO_MMC_USE_GPIO_CD is set by a legacy board (arch/sh/boards/mach-ecovec24). For DT platforms, the "cd-gpios" property is a legitimate way for that in case the IP-builtin card detection can not be used for some reason. mmc_of_parse() calls mmc_gpiod_request_cd() to set up ctx->cd_gpio if the "cd-gpios" property is specified. To cater to both cases, mmc_can_gpio_cd() is a correct way to check which card detection logic is used. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 6b18c0509ff4..0929b987fb29 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1232,7 +1232,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, } mmc->max_seg_size = mmc->max_req_size; - _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || + _host->native_hotplug = !(mmc_can_gpio_cd(mmc) || mmc->caps & MMC_CAP_NEEDS_POLL || !mmc_card_is_removable(mmc)); From 84362d79f436f12d1d9b9640a633de1b684a2609 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Tue, 16 Jan 2018 11:43:51 +0800 Subject: [PATCH 75/96] mmc: sdhci-of-arasan: Add CQHCI support for arasan,sdhci-5.1 Add CQHCI initialization and implement CQHCI operations for Arasan SDHCI variant host, namely arasan,sdhci-5.1, which is used by Rockchip RK3399 platform. Signed-off-by: Shawn Lin Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/Kconfig | 1 + drivers/mmc/host/sdhci-of-arasan.c | 137 +++++++++++++++++++++++++++-- 2 files changed, 132 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 945ba50e6e6e..579fc7adf15b 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -133,6 +133,7 @@ config MMC_SDHCI_OF_ARASAN depends on MMC_SDHCI_PLTFM depends on OF depends on COMMON_CLK + select MMC_CQHCI help This selects the Arasan Secure Digital Host Controller Interface (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC. diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index fb572066a88b..c33a5f7393bd 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -25,11 +25,13 @@ #include #include #include -#include "sdhci-pltfm.h" #include -#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 +#include "cqhci.h" +#include "sdhci-pltfm.h" +#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 +#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 #define VENDOR_ENHANCED_STROBE BIT(0) #define PHY_CLK_TOO_SLOW_HZ 400000 @@ -90,6 +92,7 @@ struct sdhci_arasan_data { struct phy *phy; bool is_phy_on; + bool has_cqe; struct clk_hw sdcardclk_hw; struct clk *sdcardclk; @@ -290,6 +293,62 @@ static const struct sdhci_pltfm_data sdhci_arasan_pdata = { SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, }; +static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static void sdhci_arasan_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_arasan_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + } + + sdhci_cqe_enable(mmc); +} + +static const struct cqhci_host_ops sdhci_arasan_cqhci_ops = { + .enable = sdhci_arasan_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_arasan_dumpregs, +}; + +static const struct sdhci_ops sdhci_arasan_cqe_ops = { + .set_clock = sdhci_arasan_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_arasan_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_power = sdhci_arasan_set_power, + .irq = sdhci_arasan_cqhci_irq, +}; + +static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = { + .ops = &sdhci_arasan_cqe_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, +}; + #ifdef CONFIG_PM_SLEEP /** * sdhci_arasan_suspend - Suspend method for the driver @@ -309,6 +368,12 @@ static int sdhci_arasan_suspend(struct device *dev) if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); + if (sdhci_arasan->has_cqe) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + ret = sdhci_suspend_host(host); if (ret) return ret; @@ -365,7 +430,16 @@ static int sdhci_arasan_resume(struct device *dev) sdhci_arasan->is_phy_on = true; } - return sdhci_resume_host(host); + ret = sdhci_resume_host(host); + if (ret) { + dev_err(dev, "Cannot resume host.\n"); + return ret; + } + + if (sdhci_arasan->has_cqe) + return cqhci_resume(host->mmc); + + return 0; } #endif /* ! CONFIG_PM_SLEEP */ @@ -568,6 +642,49 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev) of_clk_del_provider(dev->of_node); } +static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan) +{ + struct sdhci_host *host = sdhci_arasan->host; + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!sdhci_arasan->has_cqe) + return sdhci_add_host(host); + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(host->mmc->parent, + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR; + cq_host->ops = &sdhci_arasan_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + static int sdhci_arasan_probe(struct platform_device *pdev) { int ret; @@ -578,9 +695,15 @@ static int sdhci_arasan_probe(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host; struct sdhci_arasan_data *sdhci_arasan; struct device_node *np = pdev->dev.of_node; + const struct sdhci_pltfm_data *pdata; + + if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-5.1")) + pdata = &sdhci_arasan_cqe_pdata; + else + pdata = &sdhci_arasan_pdata; + + host = sdhci_pltfm_init(pdev, pdata, sizeof(*sdhci_arasan)); - host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, - sizeof(*sdhci_arasan)); if (IS_ERR(host)) return PTR_ERR(host); @@ -675,9 +798,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev) sdhci_arasan_hs400_enhanced_strobe; host->mmc_host_ops.start_signal_voltage_switch = sdhci_arasan_voltage_switch; + sdhci_arasan->has_cqe = true; + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; } - ret = sdhci_add_host(host); + ret = sdhci_arasan_add_host(sdhci_arasan); if (ret) goto err_add_host; From 5c3c6126b62d29f539a8712c65c58afc9c9d2c91 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 9 Jan 2018 09:52:18 +0200 Subject: [PATCH 76/96] mmc: sdhci-pci: Stop calling sdhci_enable_irq_wakeups() sdhci_enable_irq_wakeups() is already called by sdhci_suspend_host() so sdhci-pci should not need to call it. However sdhci_suspend_host() only calls it if wakeups are enabled, and sdhci-pci does not enable them until after calling sdhci_suspend_host(). So move the calls to sdhci_pci_init_wakeup() before calling sdhci_suspend_host(), and stop calling sdhci_enable_irq_wakeups(). That results in some simplification because sdhci_pci_suspend_host() and __sdhci_pci_suspend_host() no longer need to be separate functions. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-pci-core.c | 64 ++++++++++++------------------- 1 file changed, 24 insertions(+), 40 deletions(-) diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 00fa7a36b336..b99a970645e7 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -38,38 +38,6 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host); #ifdef CONFIG_PM_SLEEP -static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip) -{ - int i, ret; - - for (i = 0; i < chip->num_slots; i++) { - struct sdhci_pci_slot *slot = chip->slots[i]; - struct sdhci_host *host; - - if (!slot) - continue; - - host = slot->host; - - if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3) - mmc_retune_needed(host->mmc); - - ret = sdhci_suspend_host(host); - if (ret) - goto err_pci_suspend; - - if (host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ) - sdhci_enable_irq_wakeups(host); - } - - return 0; - -err_pci_suspend: - while (--i >= 0) - sdhci_resume_host(chip->slots[i]->host); - return ret; -} - static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip) { mmc_pm_flag_t pm_flags = 0; @@ -89,15 +57,33 @@ static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip) static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip) { - int ret; - - ret = __sdhci_pci_suspend_host(chip); - if (ret) - return ret; + int i, ret; sdhci_pci_init_wakeup(chip); + for (i = 0; i < chip->num_slots; i++) { + struct sdhci_pci_slot *slot = chip->slots[i]; + struct sdhci_host *host; + + if (!slot) + continue; + + host = slot->host; + + if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + ret = sdhci_suspend_host(host); + if (ret) + goto err_pci_suspend; + } + return 0; + +err_pci_suspend: + while (--i >= 0) + sdhci_resume_host(chip->slots[i]->host); + return ret; } int sdhci_pci_resume_host(struct sdhci_pci_chip *chip) @@ -1109,7 +1095,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip) { int i, ret; - ret = __sdhci_pci_suspend_host(chip); + ret = sdhci_pci_suspend_host(chip); if (ret) return ret; @@ -1119,8 +1105,6 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip) jmicron_enable_mmc(chip->slots[i]->host, 0); } - sdhci_pci_init_wakeup(chip); - return 0; } From e92cc35d627f13c85a3662949ddec79345498e33 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 9 Jan 2018 09:52:19 +0200 Subject: [PATCH 77/96] mmc: sdhci-pci: Use device wakeup capability to determine MMC_PM_WAKE_SDIO_IRQ capability PCI and ACPI determine if a device is wakeup capable, so use that to determine the MMC_PM_WAKE_SDIO_IRQ capability correctly. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-pci-core.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index b99a970645e7..6d1a983e6227 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -50,9 +50,9 @@ static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip) pm_flags |= slot->host->mmc->pm_flags; } - return device_init_wakeup(&chip->pdev->dev, - (pm_flags & MMC_PM_KEEP_POWER) && - (pm_flags & MMC_PM_WAKE_SDIO_IRQ)); + return device_set_wakeup_enable(&chip->pdev->dev, + (pm_flags & MMC_PM_KEEP_POWER) && + (pm_flags & MMC_PM_WAKE_SDIO_IRQ)); } static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip) @@ -1682,10 +1682,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( } } - host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; + host->mmc->pm_caps = MMC_PM_KEEP_POWER; host->mmc->slotno = slotno; host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; + if (device_can_wakeup(&pdev->dev)) + host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; + if (slot->cd_idx >= 0) { ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, slot->cd_override_level, 0, NULL); From 551d6bde462932e8a024d89e7325f7c6e073500b Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 9 Jan 2018 09:52:20 +0200 Subject: [PATCH 78/96] mmc: sdhci: Stop exporting sdhci_enable_irq_wakeups() Now that it is not being used by any drivers, stop exporting it. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci.c | 3 +-- drivers/mmc/host/sdhci.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 80b1a59bc3c5..6ac4bdd7715d 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2828,7 +2828,7 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) * sdhci_disable_irq_wakeups() since it will be set by * sdhci_enable_card_detection() or sdhci_init(). */ -void sdhci_enable_irq_wakeups(struct sdhci_host *host) +static void sdhci_enable_irq_wakeups(struct sdhci_host *host) { u8 val; u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE @@ -2846,7 +2846,6 @@ void sdhci_enable_irq_wakeups(struct sdhci_host *host) sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); } -EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); static void sdhci_disable_irq_wakeups(struct sdhci_host *host) { diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 54bc444c317f..7393b3a54772 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -718,7 +718,6 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable); #ifdef CONFIG_PM int sdhci_suspend_host(struct sdhci_host *host); int sdhci_resume_host(struct sdhci_host *host); -void sdhci_enable_irq_wakeups(struct sdhci_host *host); int sdhci_runtime_suspend_host(struct sdhci_host *host); int sdhci_runtime_resume_host(struct sdhci_host *host); #endif From 58e79b60751d1ea6d13e09c6095f5e4cd5e040ee Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 9 Jan 2018 09:52:21 +0200 Subject: [PATCH 79/96] mmc: sdhci: Handle failure of enable_irq_wake() Now that sdhci_enable_irq_wakeups() is a local function, change it to return whether the IRQ wakeup was successfully enabled. This is in preparation for adding more conditions for whether IRQ wakeup is enabled. Note it is assumed, for SDHCI devices, that suspend is more important than wakeup, so we continue to suspend regardless. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci.c | 24 +++++++++++++++--------- drivers/mmc/host/sdhci.h | 1 + 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6ac4bdd7715d..cc9776d9e8f4 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2828,7 +2828,7 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) * sdhci_disable_irq_wakeups() since it will be set by * sdhci_enable_card_detection() or sdhci_init(). */ -static void sdhci_enable_irq_wakeups(struct sdhci_host *host) +static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) { u8 val; u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE @@ -2845,6 +2845,10 @@ static void sdhci_enable_irq_wakeups(struct sdhci_host *host) } sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); + + host->irq_wake_enabled = !enable_irq_wake(host->irq); + + return host->irq_wake_enabled; } static void sdhci_disable_irq_wakeups(struct sdhci_host *host) @@ -2856,6 +2860,10 @@ static void sdhci_disable_irq_wakeups(struct sdhci_host *host) val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); val &= ~mask; sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); + + disable_irq_wake(host->irq); + + host->irq_wake_enabled = false; } int sdhci_suspend_host(struct sdhci_host *host) @@ -2864,15 +2872,14 @@ int sdhci_suspend_host(struct sdhci_host *host) mmc_retune_timer_stop(host->mmc); - if (!device_may_wakeup(mmc_dev(host->mmc))) { + if (!device_may_wakeup(mmc_dev(host->mmc)) || + !sdhci_enable_irq_wakeups(host)) { host->ier = 0; sdhci_writel(host, 0, SDHCI_INT_ENABLE); sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); free_irq(host->irq, host); - } else { - sdhci_enable_irq_wakeups(host); - enable_irq_wake(host->irq); } + return 0; } @@ -2900,15 +2907,14 @@ int sdhci_resume_host(struct sdhci_host *host) mmiowb(); } - if (!device_may_wakeup(mmc_dev(host->mmc))) { + if (host->irq_wake_enabled) { + sdhci_disable_irq_wakeups(host); + } else { ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, IRQF_SHARED, mmc_hostname(host->mmc), host); if (ret) return ret; - } else { - sdhci_disable_irq_wakeups(host); - disable_irq_wake(host->irq); } sdhci_enable_card_detection(host); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7393b3a54772..afab26fd70e6 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -484,6 +484,7 @@ struct sdhci_host { bool bus_on; /* Bus power prevents runtime suspend */ bool preset_enabled; /* Preset is enabled */ bool pending_reset; /* Cmd/data reset is pending */ + bool irq_wake_enabled; /* IRQ wakeup is enabled */ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ struct mmc_command *cmd; /* Current command */ From 81b14543ac81d529d6c07ce3be0cdfc9fe417389 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 9 Jan 2018 09:52:22 +0200 Subject: [PATCH 80/96] mmc: sdhci: Rework sdhci_enable_irq_wakeups() In preparation for adding more conditions for whether IRQ wakeup is enabled, rework sdhci_enable_irq_wakeups() so that needed bits are added instead of adding them all and then removing the unneeded bits. Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index cc9776d9e8f4..070aff9c108f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2830,20 +2830,25 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) */ static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) { + u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | + SDHCI_WAKE_ON_INT; + u32 irq_val = 0; + u8 wake_val = 0; u8 val; - u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE - | SDHCI_WAKE_ON_INT; - u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | - SDHCI_INT_CARD_INT; + + if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) { + wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; + irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; + } + + wake_val |= SDHCI_WAKE_ON_INT; + irq_val |= SDHCI_INT_CARD_INT; val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); - val |= mask ; - /* Avoid fake wake up */ - if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) { - val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); - irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); - } + val &= ~mask; + val |= wake_val; sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); + sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); host->irq_wake_enabled = !enable_irq_wake(host->irq); From 659032dcb9f11c3bd2a3a23db76e6a70b3ddec79 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 17 Jan 2018 13:41:57 +0000 Subject: [PATCH 81/96] mmc: sh_mmcif: remove redundant initialization of 'opc' Variable opc is initialized with a value that is never read, opc is later re-assigned a newer value, hence the initialization can be removed. Cleans up clang warning: drivers/mmc/host/sh_mmcif.c:919:6: warning: Value stored to 'opc' during its initialization is never read Signed-off-by: Colin Ian King Signed-off-by: Ulf Hansson --- drivers/mmc/host/sh_mmcif.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 53fb18bb7bee..7bb00c68a756 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -916,7 +916,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, struct mmc_request *mrq) { struct mmc_command *cmd = mrq->cmd; - u32 opc = cmd->opcode; + u32 opc; u32 mask = 0; unsigned long flags; From 8d09a13386ccdee8fb6d66aa2cfedbbc9255f892 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 18 Jan 2018 01:28:01 +0900 Subject: [PATCH 82/96] mmc: tmio: ioremap memory resource in tmio_mmc_host_alloc() The register region is ioremap'ed in the tmio_mmc_host_probe(), i.e. drivers cannot get access to the hardware before mmc_add_host(). Actually, renesas_sdhi_core.c reads out the CTL_VERSION register to complete the platform-specific settings. However, at this point, the MMC host is already running. Move the register ioremap to tmio_mmc_host_alloc() so that drivers can perform platform-specific settings between tmio_mmc_host_alloc() and tmio_mmc_host_probe(). I changed tmio_mmc_host_alloc() to return an error pointer to propagate the return code from devm_ioremap_resource(). Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_core.c | 4 ++-- drivers/mmc/host/tmio_mmc.c | 4 +++- drivers/mmc/host/tmio_mmc_core.c | 16 +++++++++------- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 6a2988bd51a2..ccdde2735f68 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -512,8 +512,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, } host = tmio_mmc_host_alloc(pdev); - if (!host) - return -ENOMEM; + if (IS_ERR(host)) + return PTR_ERR(host); if (of_data) { mmc_data->flags |= of_data->tmio_flags; diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ccfbc154ee5b..d660816bdf89 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -93,8 +93,10 @@ static int tmio_mmc_probe(struct platform_device *pdev) pdata->flags |= TMIO_MMC_HAVE_HIGH_REG; host = tmio_mmc_host_alloc(pdev); - if (!host) + if (IS_ERR(host)) { + ret = PTR_ERR(host); goto cell_disable; + } /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ host->bus_shift = resource_size(res) >> 10; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 0929b987fb29..4f62ce6664e0 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1150,12 +1150,20 @@ tmio_mmc_host_alloc(struct platform_device *pdev) { struct tmio_mmc_host *host; struct mmc_host *mmc; + struct resource *res; + void __iomem *ctl; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctl = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctl)) + return ERR_CAST(ctl); mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); if (!mmc) - return NULL; + return ERR_PTR(-ENOMEM); host = mmc_priv(mmc); + host->ctl = ctl; host->mmc = mmc; host->pdev = pdev; host->ops = tmio_mmc_ops; @@ -1177,7 +1185,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, { struct platform_device *pdev = _host->pdev; struct mmc_host *mmc = _host->mmc; - struct resource *res_ctl; int ret; u32 irq_mask = TMIO_MASK_CMD; @@ -1186,11 +1193,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) _host->write16_hook = NULL; - res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); - _host->ctl = devm_ioremap_resource(&pdev->dev, res_ctl); - if (IS_ERR(_host->ctl)) - return PTR_ERR(_host->ctl); - ret = mmc_of_parse(mmc); if (ret < 0) return ret; From b21fc294387e4cf7916c132f7d6aaeebd4483a16 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 18 Jan 2018 01:28:02 +0900 Subject: [PATCH 83/96] mmc: tmio: move clk_enable/disable out of tmio_mmc_host_probe() The clock is enabled in the tmio_mmc_host_probe(). It also prevents drivers from performing platform-specific settings before mmc_add_host() because the register access generally requires a clock. Enable/disable the clock in drivers' probe/remove. Also, I passed tmio_mmc_data to tmio_mmc_host_alloc() because renesas_sdhi_clk_enable() needs it to get the private data from tmio_mmc_host. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_core.c | 13 ++++++++--- drivers/mmc/host/tmio_mmc.c | 7 ++++-- drivers/mmc/host/tmio_mmc.h | 4 ++-- drivers/mmc/host/tmio_mmc_core.c | 33 +++++++++++----------------- 4 files changed, 30 insertions(+), 27 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index ccdde2735f68..e18a1c553df6 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -511,7 +511,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, "state_uhs"); } - host = tmio_mmc_host_alloc(pdev); + host = tmio_mmc_host_alloc(pdev, mmc_data); if (IS_ERR(host)) return PTR_ERR(host); @@ -571,10 +571,14 @@ int renesas_sdhi_probe(struct platform_device *pdev, /* All SDHI have SDIO status bits which must be 1 */ mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS; - ret = tmio_mmc_host_probe(host, mmc_data, dma_ops); - if (ret < 0) + ret = renesas_sdhi_clk_enable(host); + if (ret) goto efree; + ret = tmio_mmc_host_probe(host, dma_ops); + if (ret < 0) + goto edisclk; + /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; @@ -635,6 +639,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, eirq: tmio_mmc_host_remove(host); +edisclk: + renesas_sdhi_clk_disable(host); efree: tmio_mmc_host_free(host); @@ -647,6 +653,7 @@ int renesas_sdhi_remove(struct platform_device *pdev) struct tmio_mmc_host *host = platform_get_drvdata(pdev); tmio_mmc_host_remove(host); + renesas_sdhi_clk_disable(host); return 0; } diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index d660816bdf89..11b87ce54764 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -92,7 +92,7 @@ static int tmio_mmc_probe(struct platform_device *pdev) pdata->flags |= TMIO_MMC_HAVE_HIGH_REG; - host = tmio_mmc_host_alloc(pdev); + host = tmio_mmc_host_alloc(pdev, pdata); if (IS_ERR(host)) { ret = PTR_ERR(host); goto cell_disable; @@ -101,7 +101,10 @@ static int tmio_mmc_probe(struct platform_device *pdev) /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ host->bus_shift = resource_size(res) >> 10; - ret = tmio_mmc_host_probe(host, pdata, NULL); + host->mmc->f_max = pdata->hclk; + host->mmc->f_min = pdata->hclk / 512; + + ret = tmio_mmc_host_probe(host, NULL); if (ret) goto host_free; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 52198f2929a5..b52d7368818d 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -195,10 +195,10 @@ struct tmio_mmc_host { const struct tmio_mmc_dma_ops *dma_ops; }; -struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev); +struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, + struct tmio_mmc_data *pdata); void tmio_mmc_host_free(struct tmio_mmc_host *host); int tmio_mmc_host_probe(struct tmio_mmc_host *host, - struct tmio_mmc_data *pdata, const struct tmio_mmc_dma_ops *dma_ops); void tmio_mmc_host_remove(struct tmio_mmc_host *host); void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 4f62ce6664e0..d2790ff18294 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1145,8 +1145,8 @@ static void tmio_mmc_of_parse(struct platform_device *pdev, pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE; } -struct tmio_mmc_host* -tmio_mmc_host_alloc(struct platform_device *pdev) +struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, + struct tmio_mmc_data *pdata) { struct tmio_mmc_host *host; struct mmc_host *mmc; @@ -1166,9 +1166,12 @@ tmio_mmc_host_alloc(struct platform_device *pdev) host->ctl = ctl; host->mmc = mmc; host->pdev = pdev; + host->pdata = pdata; host->ops = tmio_mmc_ops; mmc->ops = &host->ops; + platform_set_drvdata(pdev, host); + return host; } EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc); @@ -1180,14 +1183,21 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host) EXPORT_SYMBOL_GPL(tmio_mmc_host_free); int tmio_mmc_host_probe(struct tmio_mmc_host *_host, - struct tmio_mmc_data *pdata, const struct tmio_mmc_dma_ops *dma_ops) { struct platform_device *pdev = _host->pdev; + struct tmio_mmc_data *pdata = _host->pdata; struct mmc_host *mmc = _host->mmc; int ret; u32 irq_mask = TMIO_MASK_CMD; + /* + * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from + * looping forever... + */ + if (mmc->f_min == 0) + return -EINVAL; + tmio_mmc_of_parse(pdev, pdata); if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) @@ -1197,9 +1207,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (ret < 0) return ret; - _host->pdata = pdata; - platform_set_drvdata(pdev, _host); - _host->set_pwr = pdata->set_pwr; _host->set_clk_div = pdata->set_clk_div; @@ -1247,18 +1254,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (pdata->flags & TMIO_MMC_MIN_RCAR2) _host->native_hotplug = true; - if (tmio_mmc_clk_enable(_host) < 0) { - mmc->f_max = pdata->hclk; - mmc->f_min = mmc->f_max / 512; - } - - /* - * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from - * looping forever... - */ - if (mmc->f_min == 0) - return -EINVAL; - /* * While using internal tmio hardware logic for card detection, we need * to ensure it stays powered for it to work. @@ -1336,8 +1331,6 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - - tmio_mmc_clk_disable(host); } EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); From 6fb294f791af8f491812d4eef6b13a57c9c1de34 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 18 Jan 2018 01:28:03 +0900 Subject: [PATCH 84/96] mmc: tmio: move {tmio_}mmc_of_parse() to tmio_mmc_host_alloc() mmc_of_parse() parses various DT properties and sets capability flags accordingly. However, drivers have no chance to run platform init code depending on such flags because mmc_of_parse() is called from tmio_mmc_host_probe(). Move mmc_of_parse() to tmio_mmc_host_alloc() so that drivers can handle capabilities before mmc_add_host(). Move tmio_mmc_of_parse() likewise. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc_core.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index d2790ff18294..b096b990ab10 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1152,6 +1152,7 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, struct mmc_host *mmc; struct resource *res; void __iomem *ctl; + int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ctl = devm_ioremap_resource(&pdev->dev, res); @@ -1170,8 +1171,20 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, host->ops = tmio_mmc_ops; mmc->ops = &host->ops; + ret = mmc_of_parse(host->mmc); + if (ret) { + host = ERR_PTR(ret); + goto free; + } + + tmio_mmc_of_parse(pdev, pdata); + platform_set_drvdata(pdev, host); + return host; +free: + mmc_free_host(mmc); + return host; } EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc); @@ -1198,15 +1211,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (mmc->f_min == 0) return -EINVAL; - tmio_mmc_of_parse(pdev, pdata); - if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) _host->write16_hook = NULL; - ret = mmc_of_parse(mmc); - if (ret < 0) - return ret; - _host->set_pwr = pdata->set_pwr; _host->set_clk_div = pdata->set_clk_div; From bc45719c1b1a56047246d44c7e4ed88a8ae702c1 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 18 Jan 2018 01:28:04 +0900 Subject: [PATCH 85/96] mmc: tmio: remove dma_ops from tmio_mmc_host_probe() argument Drivers need to set up various struct members for tmio_mmc_host before calling tmio_mmc_host_probe(). Do likewise for host->dma_ops instead of passing it as a function argument. Signed-off-by: Masahiro Yamada Reviewed-by: Wolfram Sang Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_core.c | 3 ++- drivers/mmc/host/tmio_mmc.c | 2 +- drivers/mmc/host/tmio_mmc.h | 3 +-- drivers/mmc/host/tmio_mmc_core.c | 4 +--- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index e18a1c553df6..80943fa07db6 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -532,6 +532,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->clk_update = renesas_sdhi_clk_update; host->clk_disable = renesas_sdhi_clk_disable; host->multi_io_quirk = renesas_sdhi_multi_io_quirk; + host->dma_ops = dma_ops; /* SDR speeds are only available on Gen2+ */ if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) { @@ -575,7 +576,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (ret) goto efree; - ret = tmio_mmc_host_probe(host, dma_ops); + ret = tmio_mmc_host_probe(host); if (ret < 0) goto edisclk; diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 11b87ce54764..43a2ea5cff24 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -104,7 +104,7 @@ static int tmio_mmc_probe(struct platform_device *pdev) host->mmc->f_max = pdata->hclk; host->mmc->f_min = pdata->hclk / 512; - ret = tmio_mmc_host_probe(host, NULL); + ret = tmio_mmc_host_probe(host); if (ret) goto host_free; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index b52d7368818d..e7d651352dc9 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -198,8 +198,7 @@ struct tmio_mmc_host { struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, struct tmio_mmc_data *pdata); void tmio_mmc_host_free(struct tmio_mmc_host *host); -int tmio_mmc_host_probe(struct tmio_mmc_host *host, - const struct tmio_mmc_dma_ops *dma_ops); +int tmio_mmc_host_probe(struct tmio_mmc_host *host); void tmio_mmc_host_remove(struct tmio_mmc_host *host); void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index b096b990ab10..7d8eec24f0ed 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1195,8 +1195,7 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host) } EXPORT_SYMBOL_GPL(tmio_mmc_host_free); -int tmio_mmc_host_probe(struct tmio_mmc_host *_host, - const struct tmio_mmc_dma_ops *dma_ops) +int tmio_mmc_host_probe(struct tmio_mmc_host *_host) { struct platform_device *pdev = _host->pdev; struct tmio_mmc_data *pdata = _host->pdata; @@ -1296,7 +1295,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, INIT_WORK(&_host->done, tmio_mmc_done_work); /* See if we also get DMA */ - _host->dma_ops = dma_ops; tmio_mmc_request_dma(_host, pdata); pm_runtime_set_active(&pdev->dev); From 85f9ef8cdfb463e6e8ff9fe8cdcc0aed438b526e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 18 Jan 2018 01:28:05 +0900 Subject: [PATCH 86/96] mmc: slot-gpio: add a helper to check capability of GPIO WP detection Like mmc_can_gpio_cd(), mmc_can_gpio_ro() will also be useful for host drivers to know whether GPIO write-protect detection is supported. Signed-off-by: Masahiro Yamada Signed-off-by: Ulf Hansson --- drivers/mmc/core/slot-gpio.c | 8 ++++++++ include/linux/mmc/slot-gpio.h | 1 + 2 files changed, 9 insertions(+) diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index f7c6e0542de7..3698b0576009 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -305,3 +305,11 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, return 0; } EXPORT_SYMBOL(mmc_gpiod_request_ro); + +bool mmc_can_gpio_ro(struct mmc_host *host) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + + return ctx->ro_gpio ? true : false; +} +EXPORT_SYMBOL(mmc_can_gpio_ro); diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 82f0d289f110..91f1ba0663c8 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -33,5 +33,6 @@ void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); void mmc_gpiod_request_cd_irq(struct mmc_host *host); bool mmc_can_gpio_cd(struct mmc_host *host); +bool mmc_can_gpio_ro(struct mmc_host *host); #endif From 1910b87f7a9e6d9f9085d36e45dce1e5547c692d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 18 Jan 2018 01:28:06 +0900 Subject: [PATCH 87/96] mmc: tmio: refactor .get_ro hook This IP provides the write protect signal level in the status register, but it is also possible to use GPIO for WP. They are exclusive, so it is not efficient to call mmc_gpio_get_ro() every time from tmio_mmc_get_ro() if we know gpio_ro is not used. Check the capability of gpio_ro just once in the probe function, then set mmc_gpio_get_ro to .get_ro if it is the case. Signed-off-by: Masahiro Yamada Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc_core.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 7d8eec24f0ed..6d8719be75a8 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1076,15 +1076,9 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc) { struct tmio_mmc_host *host = mmc_priv(mmc); struct tmio_mmc_data *pdata = host->pdata; - int ret = mmc_gpio_get_ro(mmc); - if (ret >= 0) - return ret; - - ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || - (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); - - return ret; + return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || + (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); } static int tmio_multi_io_quirk(struct mmc_card *card, @@ -1247,6 +1241,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) } mmc->max_seg_size = mmc->max_req_size; + if (mmc_can_gpio_ro(mmc)) + _host->ops.get_ro = mmc_gpio_get_ro; + _host->native_hotplug = !(mmc_can_gpio_cd(mmc) || mmc->caps & MMC_CAP_NEEDS_POLL || !mmc_card_is_removable(mmc)); From 6ea9cdf3912a8ca532dde5296a0e81fc75a40fa3 Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Thu, 18 Jan 2018 15:34:17 +0100 Subject: [PATCH 88/96] mmc: mmci: Don't pretend all variants to have MMCIMASK1 register Two mask registers are used in order to select which events have to actually generate an interrupt on each IRQ line. It seems that in the single-IRQ case it's assumed that the IRQs lines are simply OR-ed, while the two mask registers are still present. The driver still programs the two mask registers separately. However the STM32 variant has only one IRQ, and also has only one mask register. This patch prepares for STM32 variant support by making the driver using only one mask register. This patch also optimize the MMCIMASK1 mask usage by caching it into host->mask1_reg which avoid to read it into mmci_irq(). Tested only on STM32 variant. RFT for variants other than STM32 Signed-off-by: Andrea Merello Signed-off-by: Patrice Chotard Reviewed-by: Linus Walleij Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 28 ++++++++++++++++++++++++---- drivers/mmc/host/mmci.h | 1 + 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index e8a1bb1ae694..bc7669d50c38 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -82,6 +82,7 @@ static unsigned int fmax = 515633; * @qcom_fifo: enables qcom specific fifo pio read logic. * @qcom_dml: enables qcom specific dma glue for dma transfers. * @reversed_irq_handling: handle data irq before cmd irq. + * @mmcimask1: true if variant have a MMCIMASK1 register. */ struct variant_data { unsigned int clkreg; @@ -111,6 +112,7 @@ struct variant_data { bool qcom_fifo; bool qcom_dml; bool reversed_irq_handling; + bool mmcimask1; }; static struct variant_data variant_arm = { @@ -120,6 +122,7 @@ static struct variant_data variant_arm = { .pwrreg_powerup = MCI_PWR_UP, .f_max = 100000000, .reversed_irq_handling = true, + .mmcimask1 = true, }; static struct variant_data variant_arm_extended_fifo = { @@ -128,6 +131,7 @@ static struct variant_data variant_arm_extended_fifo = { .datalength_bits = 16, .pwrreg_powerup = MCI_PWR_UP, .f_max = 100000000, + .mmcimask1 = true, }; static struct variant_data variant_arm_extended_fifo_hwfc = { @@ -137,6 +141,7 @@ static struct variant_data variant_arm_extended_fifo_hwfc = { .datalength_bits = 16, .pwrreg_powerup = MCI_PWR_UP, .f_max = 100000000, + .mmcimask1 = true, }; static struct variant_data variant_u300 = { @@ -152,6 +157,7 @@ static struct variant_data variant_u300 = { .signal_direction = true, .pwrreg_clkgate = true, .pwrreg_nopower = true, + .mmcimask1 = true, }; static struct variant_data variant_nomadik = { @@ -168,6 +174,7 @@ static struct variant_data variant_nomadik = { .signal_direction = true, .pwrreg_clkgate = true, .pwrreg_nopower = true, + .mmcimask1 = true, }; static struct variant_data variant_ux500 = { @@ -190,6 +197,7 @@ static struct variant_data variant_ux500 = { .busy_detect_flag = MCI_ST_CARDBUSY, .busy_detect_mask = MCI_ST_BUSYENDMASK, .pwrreg_nopower = true, + .mmcimask1 = true, }; static struct variant_data variant_ux500v2 = { @@ -214,6 +222,7 @@ static struct variant_data variant_ux500v2 = { .busy_detect_flag = MCI_ST_CARDBUSY, .busy_detect_mask = MCI_ST_BUSYENDMASK, .pwrreg_nopower = true, + .mmcimask1 = true, }; static struct variant_data variant_qcom = { @@ -232,6 +241,7 @@ static struct variant_data variant_qcom = { .explicit_mclk_control = true, .qcom_fifo = true, .qcom_dml = true, + .mmcimask1 = true, }; /* Busy detection for the ST Micro variant */ @@ -396,6 +406,7 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) { void __iomem *base = host->base; + struct variant_data *variant = host->variant; if (host->singleirq) { unsigned int mask0 = readl(base + MMCIMASK0); @@ -406,7 +417,10 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) writel(mask0, base + MMCIMASK0); } - writel(mask, base + MMCIMASK1); + if (variant->mmcimask1) + writel(mask, base + MMCIMASK1); + + host->mask1_reg = mask; } static void mmci_stop_data(struct mmci_host *host) @@ -1286,7 +1300,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) status = readl(host->base + MMCISTATUS); if (host->singleirq) { - if (status & readl(host->base + MMCIMASK1)) + if (status & host->mask1_reg) mmci_pio_irq(irq, dev_id); status &= ~MCI_IRQ1MASK; @@ -1729,7 +1743,10 @@ static int mmci_probe(struct amba_device *dev, spin_lock_init(&host->lock); writel(0, host->base + MMCIMASK0); - writel(0, host->base + MMCIMASK1); + + if (variant->mmcimask1) + writel(0, host->base + MMCIMASK1); + writel(0xfff, host->base + MMCICLEAR); /* @@ -1809,6 +1826,7 @@ static int mmci_remove(struct amba_device *dev) if (mmc) { struct mmci_host *host = mmc_priv(mmc); + struct variant_data *variant = host->variant; /* * Undo pm_runtime_put() in probe. We use the _sync @@ -1819,7 +1837,9 @@ static int mmci_remove(struct amba_device *dev) mmc_remove_host(mmc); writel(0, host->base + MMCIMASK0); - writel(0, host->base + MMCIMASK1); + + if (variant->mmcimask1) + writel(0, host->base + MMCIMASK1); writel(0, host->base + MMCICOMMAND); writel(0, host->base + MMCIDATACTRL); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 4a8bef1aac8f..83160a9c4c77 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -223,6 +223,7 @@ struct mmci_host { u32 clk_reg; u32 datactrl_reg; u32 busy_status; + u32 mask1_reg; bool vqmmc_enabled; struct mmci_platform_data *plat; struct variant_data *variant; From 7f7b55036c567cffbb2cea4a35a971587240e6bd Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Thu, 18 Jan 2018 15:34:18 +0100 Subject: [PATCH 89/96] mmc: mmci: Don't pretend all variants to have MCI_STARBITERR flag This patch prepares for supporting the STM32 variant that has no such bit in the status register. Signed-off-by: Andrea Merello Signed-off-by: Patrice Chotard Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index bc7669d50c38..91a35b8dffc2 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -83,6 +83,8 @@ static unsigned int fmax = 515633; * @qcom_dml: enables qcom specific dma glue for dma transfers. * @reversed_irq_handling: handle data irq before cmd irq. * @mmcimask1: true if variant have a MMCIMASK1 register. + * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS + * register. */ struct variant_data { unsigned int clkreg; @@ -113,6 +115,7 @@ struct variant_data { bool qcom_dml; bool reversed_irq_handling; bool mmcimask1; + u32 start_err; }; static struct variant_data variant_arm = { @@ -123,6 +126,7 @@ static struct variant_data variant_arm = { .f_max = 100000000, .reversed_irq_handling = true, .mmcimask1 = true, + .start_err = MCI_STARTBITERR, }; static struct variant_data variant_arm_extended_fifo = { @@ -132,6 +136,7 @@ static struct variant_data variant_arm_extended_fifo = { .pwrreg_powerup = MCI_PWR_UP, .f_max = 100000000, .mmcimask1 = true, + .start_err = MCI_STARTBITERR, }; static struct variant_data variant_arm_extended_fifo_hwfc = { @@ -142,6 +147,7 @@ static struct variant_data variant_arm_extended_fifo_hwfc = { .pwrreg_powerup = MCI_PWR_UP, .f_max = 100000000, .mmcimask1 = true, + .start_err = MCI_STARTBITERR, }; static struct variant_data variant_u300 = { @@ -158,6 +164,7 @@ static struct variant_data variant_u300 = { .pwrreg_clkgate = true, .pwrreg_nopower = true, .mmcimask1 = true, + .start_err = MCI_STARTBITERR, }; static struct variant_data variant_nomadik = { @@ -175,6 +182,7 @@ static struct variant_data variant_nomadik = { .pwrreg_clkgate = true, .pwrreg_nopower = true, .mmcimask1 = true, + .start_err = MCI_STARTBITERR, }; static struct variant_data variant_ux500 = { @@ -198,6 +206,7 @@ static struct variant_data variant_ux500 = { .busy_detect_mask = MCI_ST_BUSYENDMASK, .pwrreg_nopower = true, .mmcimask1 = true, + .start_err = MCI_STARTBITERR, }; static struct variant_data variant_ux500v2 = { @@ -223,6 +232,7 @@ static struct variant_data variant_ux500v2 = { .busy_detect_mask = MCI_ST_BUSYENDMASK, .pwrreg_nopower = true, .mmcimask1 = true, + .start_err = MCI_STARTBITERR, }; static struct variant_data variant_qcom = { @@ -242,6 +252,7 @@ static struct variant_data variant_qcom = { .qcom_fifo = true, .qcom_dml = true, .mmcimask1 = true, + .start_err = MCI_STARTBITERR, }; /* Busy detection for the ST Micro variant */ @@ -935,8 +946,9 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, return; /* First check for errors */ - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| - MCI_TXUNDERRUN|MCI_RXOVERRUN)) { + if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT | + host->variant->start_err | + MCI_TXUNDERRUN | MCI_RXOVERRUN)) { u32 remain, success; /* Terminate the DMA transfer */ From 11dfb9701175ead45be9f6621619fc67598ed4ec Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Thu, 18 Jan 2018 15:34:19 +0100 Subject: [PATCH 90/96] mmc: mmci: Don't pretend all variants to have OPENDRAIN bit This patch prepares for supporting STM32 variant which doesn't have opendrain bit in MMCIPOWER register. ST others variant (u300, nomadik and ux500) uses MCI_OD bit whereas others variants uses MCI_ROD bit. Signed-off-by: Patrice Chotard Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 91a35b8dffc2..c1123f644959 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -85,6 +85,7 @@ static unsigned int fmax = 515633; * @mmcimask1: true if variant have a MMCIMASK1 register. * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS * register. + * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register */ struct variant_data { unsigned int clkreg; @@ -116,6 +117,7 @@ struct variant_data { bool reversed_irq_handling; bool mmcimask1; u32 start_err; + u32 opendrain; }; static struct variant_data variant_arm = { @@ -127,6 +129,7 @@ static struct variant_data variant_arm = { .reversed_irq_handling = true, .mmcimask1 = true, .start_err = MCI_STARTBITERR, + .opendrain = MCI_ROD, }; static struct variant_data variant_arm_extended_fifo = { @@ -137,6 +140,7 @@ static struct variant_data variant_arm_extended_fifo = { .f_max = 100000000, .mmcimask1 = true, .start_err = MCI_STARTBITERR, + .opendrain = MCI_ROD, }; static struct variant_data variant_arm_extended_fifo_hwfc = { @@ -148,6 +152,7 @@ static struct variant_data variant_arm_extended_fifo_hwfc = { .f_max = 100000000, .mmcimask1 = true, .start_err = MCI_STARTBITERR, + .opendrain = MCI_ROD, }; static struct variant_data variant_u300 = { @@ -165,6 +170,7 @@ static struct variant_data variant_u300 = { .pwrreg_nopower = true, .mmcimask1 = true, .start_err = MCI_STARTBITERR, + .opendrain = MCI_OD, }; static struct variant_data variant_nomadik = { @@ -183,6 +189,7 @@ static struct variant_data variant_nomadik = { .pwrreg_nopower = true, .mmcimask1 = true, .start_err = MCI_STARTBITERR, + .opendrain = MCI_OD, }; static struct variant_data variant_ux500 = { @@ -207,6 +214,7 @@ static struct variant_data variant_ux500 = { .pwrreg_nopower = true, .mmcimask1 = true, .start_err = MCI_STARTBITERR, + .opendrain = MCI_OD, }; static struct variant_data variant_ux500v2 = { @@ -233,6 +241,7 @@ static struct variant_data variant_ux500v2 = { .pwrreg_nopower = true, .mmcimask1 = true, .start_err = MCI_STARTBITERR, + .opendrain = MCI_OD, }; static struct variant_data variant_qcom = { @@ -253,6 +262,7 @@ static struct variant_data variant_qcom = { .qcom_dml = true, .mmcimask1 = true, .start_err = MCI_STARTBITERR, + .opendrain = MCI_ROD, }; /* Busy detection for the ST Micro variant */ @@ -1455,17 +1465,8 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ~MCI_ST_DATA2DIREN); } - if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { - if (host->hw_designer != AMBA_VENDOR_ST) - pwr |= MCI_ROD; - else { - /* - * The ST Micro variant use the ROD bit for something - * else and only has OD (Open Drain). - */ - pwr |= MCI_OD; - } - } + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN && variant->opendrain) + pwr |= variant->opendrain; /* * If clock = 0 and the variant requires the MMCIPOWER to be used for From f9bb304ce855fad615c5adffae5e129941ff0b48 Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Thu, 18 Jan 2018 15:34:20 +0100 Subject: [PATCH 91/96] mmc: mmci: Add support for setting pad type via pinctrl If variant hasn't the control bit to switch pads in opendrain mode, we can achieve the same result by asking to the pinmux driver to configure pins for us. This patch make the mmci driver able to do this whenever needed. Signed-off-by: Andrea Merello Signed-off-by: Patrice Chotard Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 41 +++++++++++++++++++++++++++++++++++++++-- drivers/mmc/host/mmci.h | 5 +++++ 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index c1123f644959..f8a21f3c3b51 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1465,8 +1465,19 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ~MCI_ST_DATA2DIREN); } - if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN && variant->opendrain) - pwr |= variant->opendrain; + if (variant->opendrain) { + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) + pwr |= variant->opendrain; + } else { + /* + * If the variant cannot configure the pads by its own, then we + * expect the pinctrl to be able to do that for us + */ + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) + pinctrl_select_state(host->pinctrl, host->pins_opendrain); + else + pinctrl_select_state(host->pinctrl, host->pins_default); + } /* * If clock = 0 and the variant requires the MMCIPOWER to be used for @@ -1610,6 +1621,32 @@ static int mmci_probe(struct amba_device *dev, host = mmc_priv(mmc); host->mmc = mmc; + /* + * Some variant (STM32) doesn't have opendrain bit, nevertheless + * pins can be set accordingly using pinctrl + */ + if (!variant->opendrain) { + host->pinctrl = devm_pinctrl_get(&dev->dev); + if (IS_ERR(host->pinctrl)) { + dev_err(&dev->dev, "failed to get pinctrl"); + goto host_free; + } + + host->pins_default = pinctrl_lookup_state(host->pinctrl, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(host->pins_default)) { + dev_err(mmc_dev(mmc), "Can't select default pins\n"); + goto host_free; + } + + host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, + MMCI_PINCTRL_STATE_OPENDRAIN); + if (IS_ERR(host->pins_opendrain)) { + dev_err(mmc_dev(mmc), "Can't select opendrain pins\n"); + goto host_free; + } + } + host->hw_designer = amba_manf(dev); host->hw_revision = amba_rev(dev); dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 83160a9c4c77..f91cdf7f6dae 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -192,6 +192,8 @@ #define NR_SG 128 +#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain" + struct clk; struct variant_data; struct dma_chan; @@ -227,6 +229,9 @@ struct mmci_host { bool vqmmc_enabled; struct mmci_platform_data *plat; struct variant_data *variant; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_opendrain; u8 hw_designer; u8 hw_revision:4; From 2a9d6c8060894ce06855b09d62be64110e48f27e Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Thu, 18 Jan 2018 15:34:21 +0100 Subject: [PATCH 92/96] mmc: mmci: Add STM32 variant STM32F4 and STM32F7 MCUs has a SDIO controller that looks like an ARM PL810. This patch adds the STM32 variant so that mmci driver supports it. Signed-off-by: Andrea Merello Signed-off-by: Patrice Chotard Reviewed-by: Linus Walleij Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index f8a21f3c3b51..6246eaada750 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -244,6 +244,23 @@ static struct variant_data variant_ux500v2 = { .opendrain = MCI_OD, }; +static struct variant_data variant_stm32 = { + .fifosize = 32 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .clkreg_enable = MCI_ST_UX500_HWFCEN, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, + .datalength_bits = 24, + .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, + .st_sdio = true, + .st_clkdiv = true, + .pwrreg_powerup = MCI_PWR_ON, + .f_max = 48000000, + .pwrreg_clkgate = true, + .pwrreg_nopower = true, +}; + static struct variant_data variant_qcom = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, @@ -2021,6 +2038,11 @@ static const struct amba_id mmci_ids[] = { .mask = 0xf0ffffff, .data = &variant_ux500v2, }, + { + .id = 0x00880180, + .mask = 0x00ffffff, + .data = &variant_stm32, + }, /* Qualcomm variants */ { .id = 0x00051180, From 4a09d0b86bad0999a2bb0e2ee126a3c5246d1f51 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jan 2018 15:54:44 +0100 Subject: [PATCH 93/96] mmc: tmio: hide unused tmio_mmc_clk_disable/tmio_mmc_clk_enable functions When CONFIG_PM is disabled, we get a warning about the clock handling being unused: drivers/mmc/host/tmio_mmc_core.c:937:13: error: 'tmio_mmc_clk_disable' defined but not used [-Werror=unused-function] static void tmio_mmc_clk_disable(struct tmio_mmc_host *host) ^~~~~~~~~~~~~~~~~~~~ drivers/mmc/host/tmio_mmc_core.c:929:12: error: 'tmio_mmc_clk_enable' defined but not used [-Werror=unused-function] static int tmio_mmc_clk_enable(struct tmio_mmc_host *host) ^~~~~~~~~~~~~~~~~~~ As the clock handling is now done elsewhere, this is only used when power management is enabled. We could make the functions as __maybe_unused, but since there is already an #ifdef section, it seems easier to move the helpers closer to their callers. Fixes: b21fc294387e ("mmc: tmio: move clk_enable/disable out of tmio_mmc_host_probe()") Signed-off-by: Arnd Bergmann Reviewed-by: Masahiro Yamada Signed-off-by: Ulf Hansson --- drivers/mmc/host/tmio_mmc_core.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 6d8719be75a8..33494241245a 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -926,20 +926,6 @@ static void tmio_mmc_done_work(struct work_struct *work) tmio_mmc_finish_request(host); } -static int tmio_mmc_clk_enable(struct tmio_mmc_host *host) -{ - if (!host->clk_enable) - return -ENOTSUPP; - - return host->clk_enable(host); -} - -static void tmio_mmc_clk_disable(struct tmio_mmc_host *host) -{ - if (host->clk_disable) - host->clk_disable(host); -} - static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) { struct mmc_host *mmc = host->mmc; @@ -1337,6 +1323,20 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); #ifdef CONFIG_PM +static int tmio_mmc_clk_enable(struct tmio_mmc_host *host) +{ + if (!host->clk_enable) + return -ENOTSUPP; + + return host->clk_enable(host); +} + +static void tmio_mmc_clk_disable(struct tmio_mmc_host *host) +{ + if (host->clk_disable) + host->clk_disable(host); +} + int tmio_mmc_host_runtime_suspend(struct device *dev) { struct tmio_mmc_host *host = dev_get_drvdata(dev); From 6478f4e12b7663cf5ab5303c06f99e9ec8c2b859 Mon Sep 17 00:00:00 2001 From: David Lechner Date: Sun, 21 Jan 2018 14:09:36 -0600 Subject: [PATCH 94/96] mmc: davinci: dont' use module_platform_driver_probe() This changes module_platform_driver_probe() to module_platform_driver() in the TI DaVinci MMC driver. On device tree systems, we can get a -EPROBE_DEFER when using a pinmux for the CD GPIO, which results in the driver never loading because module_platform_driver_probe() prevents it from being re-probed. So, we replace module_platform_driver_probe() with module_platform_driver() and removed the __init attributes accordingly. Signed-off-by: David Lechner Signed-off-by: Ulf Hansson --- drivers/mmc/host/davinci_mmc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 351330dfb954..c5309ccf502e 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -174,7 +174,7 @@ module_param(poll_loopcount, uint, S_IRUGO); MODULE_PARM_DESC(poll_loopcount, "Maximum polling loop count. Default = 32"); -static unsigned __initdata use_dma = 1; +static unsigned use_dma = 1; module_param(use_dma, uint, 0); MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); @@ -496,8 +496,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, return ret; } -static void __init_or_module -davinci_release_dma_channels(struct mmc_davinci_host *host) +static void davinci_release_dma_channels(struct mmc_davinci_host *host) { if (!host->use_dma) return; @@ -506,7 +505,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host) dma_release_channel(host->dma_rx); } -static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) +static int davinci_acquire_dma_channels(struct mmc_davinci_host *host) { host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); if (IS_ERR(host->dma_tx)) { @@ -1201,7 +1200,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc) return 0; } -static int __init davinci_mmcsd_probe(struct platform_device *pdev) +static int davinci_mmcsd_probe(struct platform_device *pdev) { const struct of_device_id *match; struct mmc_davinci_host *host = NULL; @@ -1414,11 +1413,12 @@ static struct platform_driver davinci_mmcsd_driver = { .pm = davinci_mmcsd_pm_ops, .of_match_table = davinci_mmc_dt_ids, }, + .probe = davinci_mmcsd_probe, .remove = __exit_p(davinci_mmcsd_remove), .id_table = davinci_mmc_devtype, }; -module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe); +module_platform_driver(davinci_mmcsd_driver); MODULE_AUTHOR("Texas Instruments India"); MODULE_LICENSE("GPL"); From f9de65fc61b51398cafb2e4db5cc787704fc49a1 Mon Sep 17 00:00:00 2001 From: David Lechner Date: Sun, 21 Jan 2018 14:28:13 -0600 Subject: [PATCH 95/96] mmc: davinci: suppress error message on EPROBE_DEFER This suppresses printing an error message during probe of the TI DaVinci MMC driver when the error is EPROBE_DEFER. Signed-off-by: David Lechner Signed-off-by: Ulf Hansson --- drivers/mmc/host/davinci_mmc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index c5309ccf502e..8e363174f9d6 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1253,8 +1253,9 @@ static int davinci_mmcsd_probe(struct platform_device *pdev) pdev->id_entry = match->data; ret = mmc_of_parse(mmc); if (ret) { - dev_err(&pdev->dev, - "could not parse of data: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "could not parse of data: %d\n", ret); goto parse_fail; } } else { From 310eb252a78307fc2ac4c4c755290a578c0304d0 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 23 Jan 2018 02:09:13 +0000 Subject: [PATCH 96/96] mmc: mmci: fix error return code in mmci_probe() Fix to return a negative error code from the error handling case instead of 0, as done elsewhere in this function. Fixes: f9bb304ce855 ("mmc: mmci: Add support for setting pad type via pinctrl") Signed-off-by: Wei Yongjun Reviewed-by: Patrice Chotard Signed-off-by: Ulf Hansson --- drivers/mmc/host/mmci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 6246eaada750..70b0df8b9c78 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1646,6 +1646,7 @@ static int mmci_probe(struct amba_device *dev, host->pinctrl = devm_pinctrl_get(&dev->dev); if (IS_ERR(host->pinctrl)) { dev_err(&dev->dev, "failed to get pinctrl"); + ret = PTR_ERR(host->pinctrl); goto host_free; } @@ -1653,6 +1654,7 @@ static int mmci_probe(struct amba_device *dev, PINCTRL_STATE_DEFAULT); if (IS_ERR(host->pins_default)) { dev_err(mmc_dev(mmc), "Can't select default pins\n"); + ret = PTR_ERR(host->pins_default); goto host_free; } @@ -1660,6 +1662,7 @@ static int mmci_probe(struct amba_device *dev, MMCI_PINCTRL_STATE_OPENDRAIN); if (IS_ERR(host->pins_opendrain)) { dev_err(mmc_dev(mmc), "Can't select opendrain pins\n"); + ret = PTR_ERR(host->pins_opendrain); goto host_free; } }