mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
mmc: core: Use delayed work in clock gating framework
Current clock gating framework disables the MCI clock as soon as the request is completed and enables it when a request arrives. This aggressive clock gating framework, when enabled, cause following issues: When there are back-to-back requests from the Queue layer, we unnecessarily end up disabling and enabling the clocks between these requests since 8MCLK clock cycles is a very short duration compared to the time delay between back to back requests reaching the MMC layer. This overhead can effect the overall performance depending on how long the clock enable and disable calls take which is platform dependent. For example on some platforms we can have clock control not on the local processor, but on a different subsystem and the time taken to perform the clock enable/disable can add significant overhead. Also if the host controller driver decides to disable the host clock too when mmc_set_ios function is called with ios.clock=0, it adds additional delay and it is highly possible that the next request had already arrived and unnecessarily blocked in enabling the clocks. This is seen frequently when the processor is executing at high speeds and in multi-core platforms thus reduces the overall throughput compared to if clock gating is disabled. Fix this by delaying turning off the clocks by posting request on delayed workqueue. Also cancel the unscheduled pending work, if any, when there is access to card. sysfs entry is provided to tune the delay as needed, default value set to 200ms. Signed-off-by: Sujit Reddy Thumma <sthumma@codeaurora.org> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
This commit is contained in:
parent
c59d44739a
commit
597dd9d79c
@ -64,3 +64,13 @@ Note on Erase Size and Preferred Erase Size:
|
||||
size specified by the card.
|
||||
|
||||
"preferred_erase_size" is in bytes.
|
||||
|
||||
SD/MMC/SDIO Clock Gating Attribute
|
||||
==================================
|
||||
|
||||
Read and write access is provided to following attribute.
|
||||
This attribute appears only if CONFIG_MMC_CLKGATE is enabled.
|
||||
|
||||
clkgate_delay Tune the clock gating delay with desired value in milli seconds.
|
||||
|
||||
echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
|
||||
|
@ -54,6 +54,31 @@ static DEFINE_IDR(mmc_host_idr);
|
||||
static DEFINE_SPINLOCK(mmc_host_lock);
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
static ssize_t clkgate_delay_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct mmc_host *host = cls_dev_to_mmc_host(dev);
|
||||
return snprintf(buf, PAGE_SIZE, "%lu millisecs\n",
|
||||
host->clkgate_delay);
|
||||
}
|
||||
|
||||
static ssize_t clkgate_delay_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
struct mmc_host *host = cls_dev_to_mmc_host(dev);
|
||||
unsigned long flags, value;
|
||||
|
||||
if (kstrtoul(buf, 0, &value))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
host->clkgate_delay = value;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
|
||||
pr_info("%s: clock gate delay set to %lu ms\n",
|
||||
mmc_hostname(host), value);
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enabling clock gating will make the core call out to the host
|
||||
@ -114,7 +139,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
||||
static void mmc_host_clk_gate_work(struct work_struct *work)
|
||||
{
|
||||
struct mmc_host *host = container_of(work, struct mmc_host,
|
||||
clk_gate_work);
|
||||
clk_gate_work.work);
|
||||
|
||||
mmc_host_clk_gate_delayed(host);
|
||||
}
|
||||
@ -131,6 +156,8 @@ void mmc_host_clk_hold(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* cancel any clock gating work scheduled by mmc_host_clk_release() */
|
||||
cancel_delayed_work_sync(&host->clk_gate_work);
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (host->clk_gated) {
|
||||
@ -180,7 +207,8 @@ void mmc_host_clk_release(struct mmc_host *host)
|
||||
host->clk_requests--;
|
||||
if (mmc_host_may_gate_card(host->card) &&
|
||||
!host->clk_requests)
|
||||
queue_work(system_nrt_wq, &host->clk_gate_work);
|
||||
queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
|
||||
msecs_to_jiffies(host->clkgate_delay));
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
}
|
||||
|
||||
@ -213,8 +241,13 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
|
||||
host->clk_requests = 0;
|
||||
/* Hold MCI clock for 8 cycles by default */
|
||||
host->clk_delay = 8;
|
||||
/*
|
||||
* Default clock gating delay is 200ms.
|
||||
* This value can be tuned by writing into sysfs entry.
|
||||
*/
|
||||
host->clkgate_delay = 200;
|
||||
host->clk_gated = false;
|
||||
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
||||
INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
||||
spin_lock_init(&host->clk_lock);
|
||||
mutex_init(&host->clk_gate_mutex);
|
||||
}
|
||||
@ -229,7 +262,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
|
||||
* Wait for any outstanding gate and then make sure we're
|
||||
* ungated before exiting.
|
||||
*/
|
||||
if (cancel_work_sync(&host->clk_gate_work))
|
||||
if (cancel_delayed_work_sync(&host->clk_gate_work))
|
||||
mmc_host_clk_gate_delayed(host);
|
||||
if (host->clk_gated)
|
||||
mmc_host_clk_hold(host);
|
||||
@ -237,6 +270,17 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
|
||||
WARN_ON(host->clk_requests > 1);
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
|
||||
{
|
||||
host->clkgate_delay_attr.show = clkgate_delay_show;
|
||||
host->clkgate_delay_attr.store = clkgate_delay_store;
|
||||
sysfs_attr_init(&host->clkgate_delay_attr.attr);
|
||||
host->clkgate_delay_attr.attr.name = "clkgate_delay";
|
||||
host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
|
||||
if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
|
||||
pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
|
||||
mmc_hostname(host));
|
||||
}
|
||||
#else
|
||||
|
||||
static inline void mmc_host_clk_init(struct mmc_host *host)
|
||||
@ -247,6 +291,10 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -335,6 +383,7 @@ int mmc_add_host(struct mmc_host *host)
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
mmc_add_host_debugfs(host);
|
||||
#endif
|
||||
mmc_host_clk_sysfs_init(host);
|
||||
|
||||
mmc_start_host(host);
|
||||
register_pm_notifier(&host->pm_notify);
|
||||
|
@ -253,10 +253,12 @@ struct mmc_host {
|
||||
int clk_requests; /* internal reference counter */
|
||||
unsigned int clk_delay; /* number of MCI clk hold cycles */
|
||||
bool clk_gated; /* clock gated */
|
||||
struct work_struct clk_gate_work; /* delayed clock gate */
|
||||
struct delayed_work clk_gate_work; /* delayed clock gate */
|
||||
unsigned int clk_old; /* old clock value cache */
|
||||
spinlock_t clk_lock; /* lock for clk fields */
|
||||
struct mutex clk_gate_mutex; /* mutex for clock gating */
|
||||
struct device_attribute clkgate_delay_attr;
|
||||
unsigned long clkgate_delay;
|
||||
#endif
|
||||
|
||||
/* host specific block data */
|
||||
|
Loading…
Reference in New Issue
Block a user