[CPUFREQ] Make ondemand sampling per CPU and remove the mutex usage in sampling path.

Make ondemand sampling per CPU and remove the mutex usage in sampling path.

Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Dave Jones <davej@redhat.com>
This commit is contained in:
Venkatesh Pallipadi 2006-06-28 13:51:19 -07:00 committed by Dave Jones
parent 7a6bc1cdd5
commit 2f8a835c70

View File

@ -64,6 +64,7 @@ struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle; cputime64_t prev_cpu_idle;
cputime64_t prev_cpu_wall; cputime64_t prev_cpu_wall;
struct cpufreq_policy *cur_policy; struct cpufreq_policy *cur_policy;
struct work_struct work;
unsigned int enable; unsigned int enable;
}; };
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
@ -81,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
static DEFINE_MUTEX (dbs_mutex); static DEFINE_MUTEX (dbs_mutex);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
static struct workqueue_struct *dbs_workq; static struct workqueue_struct *kondemand_wq;
struct dbs_tuners { struct dbs_tuners {
unsigned int sampling_rate; unsigned int sampling_rate;
@ -233,17 +234,15 @@ static struct attribute_group dbs_attr_group = {
/************************** sysfs end ************************/ /************************** sysfs end ************************/
static void dbs_check_cpu(int cpu) static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{ {
unsigned int idle_ticks, total_ticks; unsigned int idle_ticks, total_ticks;
unsigned int load; unsigned int load;
struct cpu_dbs_info_s *this_dbs_info;
cputime64_t cur_jiffies; cputime64_t cur_jiffies;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
unsigned int j; unsigned int j;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
if (!this_dbs_info->enable) if (!this_dbs_info->enable)
return; return;
@ -314,35 +313,29 @@ static void dbs_check_cpu(int cpu)
static void do_dbs_timer(void *data) static void do_dbs_timer(void *data)
{ {
int i; unsigned int cpu = smp_processor_id();
lock_cpu_hotplug(); struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
mutex_lock(&dbs_mutex);
for_each_online_cpu(i) dbs_check_cpu(dbs_info);
dbs_check_cpu(i); queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
queue_delayed_work(dbs_workq, &dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
unlock_cpu_hotplug();
} }
static inline void dbs_timer_init(void) static inline void dbs_timer_init(unsigned int cpu)
{ {
INIT_WORK(&dbs_work, do_dbs_timer, NULL); struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
if (!dbs_workq)
dbs_workq = create_singlethread_workqueue("ondemand"); INIT_WORK(&dbs_info->work, do_dbs_timer, 0);
if (!dbs_workq) { queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n"); usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
}
queue_delayed_work(dbs_workq, &dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return; return;
} }
static inline void dbs_timer_exit(void) static inline void dbs_timer_exit(unsigned int cpu)
{ {
if (dbs_workq) struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work);
cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work);
} }
static int cpufreq_governor_dbs(struct cpufreq_policy *policy, static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@ -370,6 +363,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break; break;
mutex_lock(&dbs_mutex); mutex_lock(&dbs_mutex);
dbs_enable++;
if (dbs_enable == 1) {
kondemand_wq = create_workqueue("kondemand");
if (!kondemand_wq) {
printk(KERN_ERR "Creation of kondemand failed\n");
dbs_enable--;
mutex_unlock(&dbs_mutex);
return -ENOSPC;
}
}
for_each_cpu_mask(j, policy->cpus) { for_each_cpu_mask(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info; struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info = &per_cpu(cpu_dbs_info, j);
@ -380,7 +383,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
} }
this_dbs_info->enable = 1; this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group); sysfs_create_group(&policy->kobj, &dbs_attr_group);
dbs_enable++;
/* /*
* Start the timerschedule work, when this governor * Start the timerschedule work, when this governor
* is used for first time * is used for first time
@ -399,23 +401,20 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
def_sampling_rate = MIN_STAT_SAMPLING_RATE; def_sampling_rate = MIN_STAT_SAMPLING_RATE;
dbs_tuners_ins.sampling_rate = def_sampling_rate; dbs_tuners_ins.sampling_rate = def_sampling_rate;
dbs_timer_init();
} }
dbs_timer_init(policy->cpu);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
break; break;
case CPUFREQ_GOV_STOP: case CPUFREQ_GOV_STOP:
mutex_lock(&dbs_mutex); mutex_lock(&dbs_mutex);
dbs_timer_exit(policy->cpu);
this_dbs_info->enable = 0; this_dbs_info->enable = 0;
sysfs_remove_group(&policy->kobj, &dbs_attr_group); sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--; dbs_enable--;
/*
* Stop the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 0) if (dbs_enable == 0)
dbs_timer_exit(); destroy_workqueue(kondemand_wq);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
@ -452,13 +451,6 @@ static int __init cpufreq_gov_dbs_init(void)
static void __exit cpufreq_gov_dbs_exit(void) static void __exit cpufreq_gov_dbs_exit(void)
{ {
/* Make sure that the scheduled work is indeed not running.
Assumes the timer has been cancelled first. */
if (dbs_workq) {
flush_workqueue(dbs_workq);
destroy_workqueue(dbs_workq);
}
cpufreq_unregister_governor(&cpufreq_gov_dbs); cpufreq_unregister_governor(&cpufreq_gov_dbs);
} }