summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2015-12-03 09:37:54 +0530
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-12-09 22:26:12 +0100
commitf08f638b9c7f1bf3cb9006d3d26bf568d807ede0 (patch)
tree5d2312f8f6f7f662332b53e26b9df77ca18547b1 /drivers/cpufreq/cpufreq_ondemand.c
parent70f43e5e798c8818d97d8d6a9bd4cd3235af9686 (diff)
downloadlinux-f08f638b9c7f1bf3cb9006d3d26bf568d807ede0.tar.gz
cpufreq: ondemand: update update_sampling_rate() to make it more efficient
Currently update_sampling_rate() runs over each online CPU and cancels/queues timers on all policy->cpus every time. This should be done just once for any cpu belonging to a policy. Create a cpumask and keep on clearing it as and when we process policies, so that we don't have to traverse through all CPUs of the same policy. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index f879012cf849..eae51070c034 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -246,6 +246,7 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
unsigned int new_rate)
{
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cpumask cpumask;
int cpu;
od_tuners->sampling_rate = new_rate = max(new_rate,
@@ -256,7 +257,9 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
*/
mutex_lock(&od_dbs_cdata.mutex);
- for_each_online_cpu(cpu) {
+ cpumask_copy(&cpumask, cpu_online_mask);
+
+ for_each_cpu(cpu, &cpumask) {
struct cpufreq_policy *policy;
struct od_cpu_dbs_info_s *dbs_info;
struct cpu_dbs_info *cdbs;
@@ -276,6 +279,9 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
policy = shared->policy;
+ /* clear all CPUs of this policy */
+ cpumask_andnot(&cpumask, &cpumask, policy->cpus);
+
/*
* Update sampling rate for CPUs whose policy is governed by
* dbs_data. In case of governor_per_policy, only a single
@@ -285,6 +291,10 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
if (dbs_data != policy->governor_data)
continue;
+ /*
+ * Checking this for any CPU should be fine, timers for all of
+ * them are scheduled together.
+ */
next_sampling = jiffies + usecs_to_jiffies(new_rate);
appointed_at = dbs_info->cdbs.timer.expires;