blob: 542c9caf88153087fdbd66ef5bef16f93c55aa98 [file] [log] [blame]
viresh kumar2aacdff2012-10-23 01:28:05 +02001/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
Viresh Kumar4471a342012-10-26 00:47:42 +02006 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
viresh kumar2aacdff2012-10-23 01:28:05 +020012 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Viresh Kumar4471a342012-10-26 00:47:42 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
viresh kumar2aacdff2012-10-23 01:28:05 +020019#include <linux/export.h>
20#include <linux/kernel_stat.h>
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000021#include <linux/slab.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022
23#include "cpufreq_governor.h"
24
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +010025static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
26
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +010027static DEFINE_MUTEX(gov_dbs_data_mutex);
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +010028
Viresh Kumaraded3872016-02-11 17:31:15 +053029/* Common sysfs tunables */
30/**
31 * store_sampling_rate - update sampling rate effective immediately if needed.
32 *
33 * If new rate is smaller than the old, simply updating
34 * dbs.sampling_rate might not be appropriate. For example, if the
35 * original sampling_rate was 1 second and the requested new sampling rate is 10
36 * ms because the user needs immediate reaction from ondemand governor, but not
37 * sure if higher frequency will be required or not, then, the governor may
38 * change the sampling rate too late; up to 1 second later. Thus, if we are
39 * reducing the sampling rate, we need to make the new value effective
40 * immediately.
41 *
Viresh Kumaraded3872016-02-11 17:31:15 +053042 * This must be called with dbs_data->mutex held, otherwise traversing
43 * policy_dbs_list isn't safe.
44 */
45ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
46 size_t count)
47{
48 struct policy_dbs_info *policy_dbs;
49 unsigned int rate;
50 int ret;
51 ret = sscanf(buf, "%u", &rate);
52 if (ret != 1)
53 return -EINVAL;
54
55 dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
56
57 /*
58 * We are operating under dbs_data->mutex and so the list and its
59 * entries can't be freed concurrently.
60 */
61 list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
62 mutex_lock(&policy_dbs->timer_mutex);
63 /*
64 * On 32-bit architectures this may race with the
65 * sample_delay_ns read in dbs_update_util_handler(), but that
66 * really doesn't matter. If the read returns a value that's
67 * too big, the sample will be skipped, but the next invocation
68 * of dbs_update_util_handler() (when the update has been
Rafael J. Wysocki78347cd2016-02-15 02:20:11 +010069 * completed) will take a sample.
Viresh Kumaraded3872016-02-11 17:31:15 +053070 *
71 * If this runs in parallel with dbs_work_handler(), we may end
72 * up overwriting the sample_delay_ns value that it has just
Rafael J. Wysocki78347cd2016-02-15 02:20:11 +010073 * written, but it will be corrected next time a sample is
74 * taken, so it shouldn't be significant.
Viresh Kumaraded3872016-02-11 17:31:15 +053075 */
Rafael J. Wysocki78347cd2016-02-15 02:20:11 +010076 gov_update_sample_delay(policy_dbs, 0);
Viresh Kumaraded3872016-02-11 17:31:15 +053077 mutex_unlock(&policy_dbs->timer_mutex);
78 }
79
80 return count;
81}
82EXPORT_SYMBOL_GPL(store_sampling_rate);
83
Rafael J. Wysockia33cce12016-02-18 02:26:55 +010084/**
85 * gov_update_cpu_data - Update CPU load data.
Rafael J. Wysockia33cce12016-02-18 02:26:55 +010086 * @dbs_data: Top-level governor data pointer.
87 *
88 * Update CPU load data for all CPUs in the domain governed by @dbs_data
89 * (that may be a single policy or a bunch of them if governor tunables are
90 * system-wide).
91 *
92 * Call under the @dbs_data mutex.
93 */
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +010094void gov_update_cpu_data(struct dbs_data *dbs_data)
Rafael J. Wysockia33cce12016-02-18 02:26:55 +010095{
96 struct policy_dbs_info *policy_dbs;
97
98 list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
99 unsigned int j;
100
101 for_each_cpu(j, policy_dbs->policy->cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100102 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Rafael J. Wysockia33cce12016-02-18 02:26:55 +0100103
104 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
105 dbs_data->io_is_busy);
106 if (dbs_data->ignore_nice_load)
107 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
108 }
109 }
110}
111EXPORT_SYMBOL_GPL(gov_update_cpu_data);
112
Viresh Kumarc4435632016-02-09 09:01:33 +0530113static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000114{
Viresh Kumarc4435632016-02-09 09:01:33 +0530115 return container_of(kobj, struct dbs_data, kobj);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000116}
117
Viresh Kumarc4435632016-02-09 09:01:33 +0530118static inline struct governor_attr *to_gov_attr(struct attribute *attr)
119{
120 return container_of(attr, struct governor_attr, attr);
121}
122
123static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
124 char *buf)
125{
126 struct dbs_data *dbs_data = to_dbs_data(kobj);
127 struct governor_attr *gattr = to_gov_attr(attr);
128 int ret = -EIO;
129
130 if (gattr->show)
131 ret = gattr->show(dbs_data, buf);
132
133 return ret;
134}
135
136static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
137 const char *buf, size_t count)
138{
139 struct dbs_data *dbs_data = to_dbs_data(kobj);
140 struct governor_attr *gattr = to_gov_attr(attr);
141 int ret = -EIO;
142
143 mutex_lock(&dbs_data->mutex);
144
Rafael J. Wysocki574ef142016-02-18 02:19:00 +0100145 if (dbs_data->usage_count && gattr->store)
Viresh Kumarc4435632016-02-09 09:01:33 +0530146 ret = gattr->store(dbs_data, buf, count);
147
148 mutex_unlock(&dbs_data->mutex);
149
150 return ret;
151}
152
153/*
154 * Sysfs Ops for accessing governor attributes.
155 *
156 * All show/store invocations for governor specific sysfs attributes, will first
157 * call the below show/store callbacks and the attribute specific callback will
158 * be called from within it.
159 */
160static const struct sysfs_ops governor_sysfs_ops = {
161 .show = governor_show,
162 .store = governor_store,
163};
164
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100165unsigned int dbs_update(struct cpufreq_policy *policy)
Viresh Kumar4471a342012-10-26 00:47:42 +0200166{
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100167 struct policy_dbs_info *policy_dbs = policy->governor_data;
168 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Viresh Kumarff4b1782016-02-09 09:01:32 +0530169 unsigned int ignore_nice = dbs_data->ignore_nice_load;
Viresh Kumar4471a342012-10-26 00:47:42 +0200170 unsigned int max_load = 0;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +0100171 unsigned int sampling_rate, io_busy, j;
Viresh Kumar4471a342012-10-26 00:47:42 +0200172
Rafael J. Wysocki57dc3bc2016-02-15 02:20:51 +0100173 /*
174 * Sometimes governors may use an additional multiplier to increase
175 * sample delays temporarily. Apply that multiplier to sampling_rate
176 * so as to keep the wake-up-from-idle detection logic a bit
177 * conservative.
178 */
179 sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +0100180 /*
181 * For the purpose of ondemand, waiting for disk IO is an indication
182 * that you're performance critical, and not that the system is actually
183 * idle, so do not add the iowait time to the CPU idle time then.
184 */
185 io_busy = dbs_data->io_is_busy;
Viresh Kumar4471a342012-10-26 00:47:42 +0200186
Stratos Karafotisdfa5bb62013-06-05 19:01:25 +0300187 /* Get Absolute Load */
Viresh Kumar4471a342012-10-26 00:47:42 +0200188 for_each_cpu(j, policy->cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100189 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Stratos Karafotis9366d842013-02-28 16:57:32 +0000190 u64 cur_wall_time, cur_idle_time;
191 unsigned int idle_time, wall_time;
Viresh Kumar4471a342012-10-26 00:47:42 +0200192 unsigned int load;
193
Stratos Karafotis9366d842013-02-28 16:57:32 +0000194 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
Viresh Kumar4471a342012-10-26 00:47:42 +0200195
Rafael J. Wysocki57eb8322016-02-16 00:58:47 +0100196 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
Viresh Kumar4471a342012-10-26 00:47:42 +0200197 j_cdbs->prev_cpu_wall = cur_wall_time;
198
Rafael J. Wysocki57eb8322016-02-16 00:58:47 +0100199 if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
200 idle_time = 0;
201 } else {
202 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
203 j_cdbs->prev_cpu_idle = cur_idle_time;
204 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200205
206 if (ignore_nice) {
Rafael J. Wysocki679b8fe2016-02-15 02:15:50 +0100207 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Viresh Kumar4471a342012-10-26 00:47:42 +0200208
Rafael J. Wysocki679b8fe2016-02-15 02:15:50 +0100209 idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
210 j_cdbs->prev_cpu_nice = cur_nice;
Viresh Kumar4471a342012-10-26 00:47:42 +0200211 }
212
Viresh Kumar4471a342012-10-26 00:47:42 +0200213 if (unlikely(!wall_time || wall_time < idle_time))
214 continue;
215
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530216 /*
217 * If the CPU had gone completely idle, and a task just woke up
218 * on this CPU now, it would be unfair to calculate 'load' the
219 * usual way for this elapsed time-window, because it will show
220 * near-zero load, irrespective of how CPU intensive that task
221 * actually is. This is undesirable for latency-sensitive bursty
222 * workloads.
223 *
224 * To avoid this, we reuse the 'load' from the previous
225 * time-window and give this task a chance to start with a
226 * reasonably high CPU frequency. (However, we shouldn't over-do
227 * this copy, lest we get stuck at a high load (high frequency)
228 * for too long, even when the current system load has actually
229 * dropped down. So we perform the copy only once, upon the
230 * first wake-up from idle.)
231 *
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100232 * Detecting this situation is easy: the governor's utilization
233 * update handler would not have run during CPU-idle periods.
234 * Hence, an unusually large 'wall_time' (as compared to the
235 * sampling rate) indicates this scenario.
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530236 *
237 * prev_load can be zero in two cases and we must recalculate it
238 * for both cases:
239 * - during long idle intervals
240 * - explicitly set to zero
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530241 */
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530242 if (unlikely(wall_time > (2 * sampling_rate) &&
243 j_cdbs->prev_load)) {
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530244 load = j_cdbs->prev_load;
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530245
246 /*
247 * Perform a destructive copy, to ensure that we copy
248 * the previous load only once, upon the first wake-up
249 * from idle.
250 */
251 j_cdbs->prev_load = 0;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530252 } else {
253 load = 100 * (wall_time - idle_time) / wall_time;
254 j_cdbs->prev_load = load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530255 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200256
Viresh Kumar4471a342012-10-26 00:47:42 +0200257 if (load > max_load)
258 max_load = load;
259 }
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100260 return max_load;
Viresh Kumar4471a342012-10-26 00:47:42 +0200261}
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100262EXPORT_SYMBOL_GPL(dbs_update);
Viresh Kumar4471a342012-10-26 00:47:42 +0200263
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100264void gov_set_update_util(struct policy_dbs_info *policy_dbs,
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100265 unsigned int delay_us)
Viresh Kumar4471a342012-10-26 00:47:42 +0200266{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100267 struct cpufreq_policy *policy = policy_dbs->policy;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530268 int cpu;
Viresh Kumar4471a342012-10-26 00:47:42 +0200269
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100270 gov_update_sample_delay(policy_dbs, delay_us);
271 policy_dbs->last_sample_time = 0;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100272
Viresh Kumar70f43e52015-12-09 07:34:42 +0530273 for_each_cpu(cpu, policy->cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100274 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100275
276 cpufreq_set_update_util_data(cpu, &cdbs->update_util);
Viresh Kumar031299b2013-02-27 12:24:03 +0530277 }
278}
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100279EXPORT_SYMBOL_GPL(gov_set_update_util);
Viresh Kumar031299b2013-02-27 12:24:03 +0530280
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100281static inline void gov_clear_update_util(struct cpufreq_policy *policy)
Viresh Kumar031299b2013-02-27 12:24:03 +0530282{
Viresh Kumar031299b2013-02-27 12:24:03 +0530283 int i;
284
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100285 for_each_cpu(i, policy->cpus)
286 cpufreq_set_update_util_data(i, NULL);
287
288 synchronize_rcu();
Viresh Kumar4471a342012-10-26 00:47:42 +0200289}
290
Viresh Kumar581c2142016-02-11 17:31:14 +0530291static void gov_cancel_work(struct cpufreq_policy *policy)
Viresh Kumar70f43e52015-12-09 07:34:42 +0530292{
Viresh Kumar581c2142016-02-11 17:31:14 +0530293 struct policy_dbs_info *policy_dbs = policy->governor_data;
294
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100295 gov_clear_update_util(policy_dbs->policy);
296 irq_work_sync(&policy_dbs->irq_work);
297 cancel_work_sync(&policy_dbs->work);
Rafael J. Wysocki686cc632016-02-08 23:41:10 +0100298 atomic_set(&policy_dbs->work_count, 0);
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100299 policy_dbs->work_in_progress = false;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530300}
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530301
Viresh Kumar70f43e52015-12-09 07:34:42 +0530302static void dbs_work_handler(struct work_struct *work)
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530303{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100304 struct policy_dbs_info *policy_dbs;
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530305 struct cpufreq_policy *policy;
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100306 struct dbs_governor *gov;
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530307
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100308 policy_dbs = container_of(work, struct policy_dbs_info, work);
309 policy = policy_dbs->policy;
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100310 gov = dbs_governor_of(policy);
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530311
Viresh Kumar70f43e52015-12-09 07:34:42 +0530312 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100313 * Make sure cpufreq_governor_limits() isn't evaluating load or the
314 * ondemand governor isn't updating the sampling rate in parallel.
Viresh Kumar70f43e52015-12-09 07:34:42 +0530315 */
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100316 mutex_lock(&policy_dbs->timer_mutex);
Rafael J. Wysocki07aa4402016-02-15 02:22:13 +0100317 gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100318 mutex_unlock(&policy_dbs->timer_mutex);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530319
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100320 /* Allow the utilization update handler to queue up more work. */
321 atomic_set(&policy_dbs->work_count, 0);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100322 /*
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100323 * If the update below is reordered with respect to the sample delay
324 * modification, the utilization update handler may end up using a stale
325 * sample delay value.
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100326 */
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100327 smp_wmb();
328 policy_dbs->work_in_progress = false;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530329}
330
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100331static void dbs_irq_work(struct irq_work *irq_work)
Viresh Kumar70f43e52015-12-09 07:34:42 +0530332{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100333 struct policy_dbs_info *policy_dbs;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100334
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100335 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
336 schedule_work(&policy_dbs->work);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100337}
338
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100339static void dbs_update_util_handler(struct update_util_data *data, u64 time,
340 unsigned long util, unsigned long max)
341{
342 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100343 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100344 u64 delta_ns;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530345
346 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100347 * The work may not be allowed to be queued up right now.
348 * Possible reasons:
349 * - Work has already been queued up or is in progress.
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100350 * - It is too early (too little time from the previous sample).
Viresh Kumar70f43e52015-12-09 07:34:42 +0530351 */
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100352 if (policy_dbs->work_in_progress)
353 return;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100354
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100355 /*
356 * If the reads below are reordered before the check above, the value
357 * of sample_delay_ns used in the computation may be stale.
358 */
359 smp_rmb();
360 delta_ns = time - policy_dbs->last_sample_time;
361 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
362 return;
363
364 /*
365 * If the policy is not shared, the irq_work may be queued up right away
366 * at this point. Otherwise, we need to ensure that only one of the
367 * CPUs sharing the policy will do that.
368 */
369 if (policy_dbs->is_shared &&
370 !atomic_add_unless(&policy_dbs->work_count, 1, 1))
371 return;
372
373 policy_dbs->last_sample_time = time;
374 policy_dbs->work_in_progress = true;
375 irq_work_queue(&policy_dbs->irq_work);
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530376}
Viresh Kumar44472662013-01-31 17:28:02 +0000377
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100378static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
379 struct dbs_governor *gov)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530380{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100381 struct policy_dbs_info *policy_dbs;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530382 int j;
383
Rafael J. Wysocki7d5a9952016-02-18 18:40:14 +0100384 /* Allocate memory for per-policy governor data. */
385 policy_dbs = gov->alloc();
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100386 if (!policy_dbs)
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100387 return NULL;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530388
Viresh Kumar581c2142016-02-11 17:31:14 +0530389 policy_dbs->policy = policy;
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100390 mutex_init(&policy_dbs->timer_mutex);
Rafael J. Wysocki686cc632016-02-08 23:41:10 +0100391 atomic_set(&policy_dbs->work_count, 0);
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100392 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
393 INIT_WORK(&policy_dbs->work, dbs_work_handler);
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100394
395 /* Set policy_dbs for all CPUs, online+offline */
396 for_each_cpu(j, policy->related_cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100397 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100398
399 j_cdbs->policy_dbs = policy_dbs;
400 j_cdbs->update_util.func = dbs_update_util_handler;
401 }
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100402 return policy_dbs;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530403}
404
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100405static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100406 struct dbs_governor *gov)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530407{
Viresh Kumar44152cb2015-07-18 11:30:59 +0530408 int j;
409
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100410 mutex_destroy(&policy_dbs->timer_mutex);
Viresh Kumar5e4500d2015-12-03 09:37:52 +0530411
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100412 for_each_cpu(j, policy_dbs->policy->related_cpus) {
413 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530414
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100415 j_cdbs->policy_dbs = NULL;
416 j_cdbs->update_util.func = NULL;
417 }
Rafael J. Wysocki7d5a9952016-02-18 18:40:14 +0100418 gov->free(policy_dbs);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530419}
420
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100421static int cpufreq_governor_init(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530422{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100423 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100424 struct dbs_data *dbs_data;
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100425 struct policy_dbs_info *policy_dbs;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530426 unsigned int latency;
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100427 int ret = 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530428
Viresh Kumara72c4952015-07-18 11:31:01 +0530429 /* State should be equivalent to EXIT */
430 if (policy->governor_data)
431 return -EBUSY;
432
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100433 policy_dbs = alloc_policy_dbs_info(policy, gov);
434 if (!policy_dbs)
435 return -ENOMEM;
436
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100437 /* Protect gov->gdbs_data against concurrent updates. */
438 mutex_lock(&gov_dbs_data_mutex);
439
440 dbs_data = gov->gdbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530441 if (dbs_data) {
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100442 if (WARN_ON(have_governor_per_policy())) {
443 ret = -EINVAL;
444 goto free_policy_dbs_info;
445 }
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100446 policy_dbs->dbs_data = dbs_data;
447 policy->governor_data = policy_dbs;
Viresh Kumarc54df072016-02-10 11:00:25 +0530448
449 mutex_lock(&dbs_data->mutex);
450 dbs_data->usage_count++;
451 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
452 mutex_unlock(&dbs_data->mutex);
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100453 goto out;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530454 }
455
456 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100457 if (!dbs_data) {
458 ret = -ENOMEM;
459 goto free_policy_dbs_info;
460 }
Viresh Kumar44152cb2015-07-18 11:30:59 +0530461
Viresh Kumarc54df072016-02-10 11:00:25 +0530462 INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
Viresh Kumarc4435632016-02-09 09:01:33 +0530463 mutex_init(&dbs_data->mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530464
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100465 ret = gov->init(dbs_data, !policy->governor->initialized);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530466 if (ret)
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100467 goto free_policy_dbs_info;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530468
469 /* policy latency is in ns. Convert it to us first */
470 latency = policy->cpuinfo.transition_latency / 1000;
471 if (latency == 0)
472 latency = 1;
473
474 /* Bring kernel and HW constraints together */
475 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
476 MIN_LATENCY_MULTIPLIER * latency);
Viresh Kumarff4b1782016-02-09 09:01:32 +0530477 dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
478 LATENCY_MULTIPLIER * latency);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530479
Viresh Kumar8eec1022015-10-15 21:35:22 +0530480 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100481 gov->gdbs_data = dbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530482
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100483 policy->governor_data = policy_dbs;
Viresh Kumare4b133c2016-01-25 22:33:46 +0530484
Viresh Kumarc54df072016-02-10 11:00:25 +0530485 policy_dbs->dbs_data = dbs_data;
486 dbs_data->usage_count = 1;
487 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
488
Viresh Kumarc4435632016-02-09 09:01:33 +0530489 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
490 ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
491 get_governor_parent_kobj(policy),
492 "%s", gov->gov.name);
Rafael J. Wysockifafd5e82016-02-08 23:57:22 +0100493 if (!ret)
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100494 goto out;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530495
Rafael J. Wysockifafd5e82016-02-08 23:57:22 +0100496 /* Failure, so roll back. */
Viresh Kumarc4435632016-02-09 09:01:33 +0530497 pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530498
Viresh Kumare4b133c2016-01-25 22:33:46 +0530499 policy->governor_data = NULL;
500
Viresh Kumar8eec1022015-10-15 21:35:22 +0530501 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100502 gov->gdbs_data = NULL;
503 gov->exit(dbs_data, !policy->governor->initialized);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100504 kfree(dbs_data);
505
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100506free_policy_dbs_info:
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100507 free_policy_dbs_info(policy_dbs, gov);
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100508
509out:
510 mutex_unlock(&gov_dbs_data_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530511 return ret;
512}
513
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100514static int cpufreq_governor_exit(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530515{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100516 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100517 struct policy_dbs_info *policy_dbs = policy->governor_data;
518 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Viresh Kumarc54df072016-02-10 11:00:25 +0530519 int count;
Viresh Kumara72c4952015-07-18 11:31:01 +0530520
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100521 /* Protect gov->gdbs_data against concurrent updates. */
522 mutex_lock(&gov_dbs_data_mutex);
523
Viresh Kumarc54df072016-02-10 11:00:25 +0530524 mutex_lock(&dbs_data->mutex);
525 list_del(&policy_dbs->list);
526 count = --dbs_data->usage_count;
527 mutex_unlock(&dbs_data->mutex);
528
529 if (!count) {
Viresh Kumarc4435632016-02-09 09:01:33 +0530530 kobject_put(&dbs_data->kobj);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530531
Viresh Kumare4b133c2016-01-25 22:33:46 +0530532 policy->governor_data = NULL;
533
Viresh Kumar8eec1022015-10-15 21:35:22 +0530534 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100535 gov->gdbs_data = NULL;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530536
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100537 gov->exit(dbs_data, policy->governor->initialized == 1);
Viresh Kumarc4435632016-02-09 09:01:33 +0530538 mutex_destroy(&dbs_data->mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530539 kfree(dbs_data);
Viresh Kumare4b133c2016-01-25 22:33:46 +0530540 } else {
541 policy->governor_data = NULL;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530542 }
Viresh Kumar44152cb2015-07-18 11:30:59 +0530543
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100544 free_policy_dbs_info(policy_dbs, gov);
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100545
546 mutex_unlock(&gov_dbs_data_mutex);
Viresh Kumara72c4952015-07-18 11:31:01 +0530547 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530548}
549
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100550static int cpufreq_governor_start(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530551{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100552 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100553 struct policy_dbs_info *policy_dbs = policy->governor_data;
554 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Rafael J. Wysocki702c9e52016-02-18 02:21:21 +0100555 unsigned int sampling_rate, ignore_nice, j;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +0100556 unsigned int io_busy;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530557
558 if (!policy->cur)
559 return -EINVAL;
560
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100561 policy_dbs->is_shared = policy_is_shared(policy);
Rafael J. Wysocki57dc3bc2016-02-15 02:20:51 +0100562 policy_dbs->rate_mult = 1;
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100563
Viresh Kumarff4b1782016-02-09 09:01:32 +0530564 sampling_rate = dbs_data->sampling_rate;
565 ignore_nice = dbs_data->ignore_nice_load;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +0100566 io_busy = dbs_data->io_is_busy;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530567
Viresh Kumar714a2d92015-06-04 16:43:27 +0530568 for_each_cpu(j, policy->cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100569 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530570 unsigned int prev_load;
571
Rafael J. Wysocki57eb8322016-02-16 00:58:47 +0100572 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530573
Rafael J. Wysocki57eb8322016-02-16 00:58:47 +0100574 prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
575 j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530576
577 if (ignore_nice)
578 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Viresh Kumar714a2d92015-06-04 16:43:27 +0530579 }
580
Rafael J. Wysocki702c9e52016-02-18 02:21:21 +0100581 gov->start(policy);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530582
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100583 gov_set_update_util(policy_dbs, sampling_rate);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530584 return 0;
585}
586
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100587static int cpufreq_governor_stop(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530588{
Viresh Kumar581c2142016-02-11 17:31:14 +0530589 gov_cancel_work(policy);
Viresh Kumara72c4952015-07-18 11:31:01 +0530590 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530591}
592
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100593static int cpufreq_governor_limits(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530594{
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100595 struct policy_dbs_info *policy_dbs = policy->governor_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530596
Rafael J. Wysockie9751892016-02-07 16:23:49 +0100597 mutex_lock(&policy_dbs->timer_mutex);
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100598
Rafael J. Wysockie9751892016-02-07 16:23:49 +0100599 if (policy->max < policy->cur)
600 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
601 else if (policy->min > policy->cur)
602 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100603
604 gov_update_sample_delay(policy_dbs, 0);
605
Rafael J. Wysockie9751892016-02-07 16:23:49 +0100606 mutex_unlock(&policy_dbs->timer_mutex);
Viresh Kumara72c4952015-07-18 11:31:01 +0530607
608 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530609}
610
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100611int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000612{
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100613 if (event == CPUFREQ_GOV_POLICY_INIT) {
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100614 return cpufreq_governor_init(policy);
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100615 } else if (policy->governor_data) {
616 switch (event) {
617 case CPUFREQ_GOV_POLICY_EXIT:
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100618 return cpufreq_governor_exit(policy);
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100619 case CPUFREQ_GOV_START:
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100620 return cpufreq_governor_start(policy);
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100621 case CPUFREQ_GOV_STOP:
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100622 return cpufreq_governor_stop(policy);
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100623 case CPUFREQ_GOV_LIMITS:
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100624 return cpufreq_governor_limits(policy);
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100625 }
Viresh Kumar732b6d62015-06-03 15:57:13 +0530626 }
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100627 return -EINVAL;
Viresh Kumar4471a342012-10-26 00:47:42 +0200628}
629EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);