blob: bf6de38190cf5ff19a867e2515b4cb47e9ed515b [file] [log] [blame]
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001/*
2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3 * for Non-CPU Devices.
4 *
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
MyungJoo Ham952f6d12011-11-10 10:16:23 +010018#include <linux/module.h>
MyungJoo Hama3c98b82011-10-02 00:19:15 +020019#include <linux/slab.h>
MyungJoo Ham952f6d12011-11-10 10:16:23 +010020#include <linux/stat.h>
MyungJoo Hama3c98b82011-10-02 00:19:15 +020021#include <linux/opp.h>
22#include <linux/devfreq.h>
23#include <linux/workqueue.h>
24#include <linux/platform_device.h>
25#include <linux/list.h>
26#include <linux/printk.h>
27#include <linux/hrtimer.h>
28#include "governor.h"
29
Nishanth Menon1a1357e2012-10-26 01:50:53 +020030static struct class *devfreq_class;
MyungJoo Hama3c98b82011-10-02 00:19:15 +020031
32/*
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +020033 * devfreq core provides delayed work based load monitoring helper
34 * functions. Governors can use these or can implement their own
35 * monitoring mechanism.
MyungJoo Hama3c98b82011-10-02 00:19:15 +020036 */
MyungJoo Hama3c98b82011-10-02 00:19:15 +020037static struct workqueue_struct *devfreq_wq;
MyungJoo Hama3c98b82011-10-02 00:19:15 +020038
39/* The list of all device-devfreq */
40static LIST_HEAD(devfreq_list);
41static DEFINE_MUTEX(devfreq_list_lock);
42
43/**
44 * find_device_devfreq() - find devfreq struct using device pointer
45 * @dev: device pointer used to lookup device devfreq.
46 *
47 * Search the list of device devfreqs and return the matched device's
48 * devfreq info. devfreq_list_lock should be held by the caller.
49 */
50static struct devfreq *find_device_devfreq(struct device *dev)
51{
52 struct devfreq *tmp_devfreq;
53
54 if (unlikely(IS_ERR_OR_NULL(dev))) {
55 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
56 return ERR_PTR(-EINVAL);
57 }
58 WARN(!mutex_is_locked(&devfreq_list_lock),
59 "devfreq_list_lock must be locked.");
60
61 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
62 if (tmp_devfreq->dev.parent == dev)
63 return tmp_devfreq;
64 }
65
66 return ERR_PTR(-ENODEV);
67}
68
Jonghwa Leee552bba2012-08-23 20:00:46 +090069/**
70 * devfreq_get_freq_level() - Lookup freq_table for the frequency
71 * @devfreq: the devfreq instance
72 * @freq: the target frequency
73 */
74static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
75{
76 int lev;
77
78 for (lev = 0; lev < devfreq->profile->max_state; lev++)
79 if (freq == devfreq->profile->freq_table[lev])
80 return lev;
81
82 return -EINVAL;
83}
84
85/**
86 * devfreq_update_status() - Update statistics of devfreq behavior
87 * @devfreq: the devfreq instance
88 * @freq: the update target frequency
89 */
90static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
91{
92 int lev, prev_lev;
93 unsigned long cur_time;
94
95 lev = devfreq_get_freq_level(devfreq, freq);
96 if (lev < 0)
97 return lev;
98
99 cur_time = jiffies;
100 devfreq->time_in_state[lev] +=
101 cur_time - devfreq->last_stat_updated;
102 if (freq != devfreq->previous_freq) {
103 prev_lev = devfreq_get_freq_level(devfreq,
104 devfreq->previous_freq);
105 devfreq->trans_table[(prev_lev *
106 devfreq->profile->max_state) + lev]++;
107 devfreq->total_trans++;
108 }
109 devfreq->last_stat_updated = cur_time;
110
111 return 0;
112}
113
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200114/* Load monitoring helper functions for governors use */
115
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200116/**
117 * update_devfreq() - Reevaluate the device and configure frequency.
118 * @devfreq: the devfreq instance.
119 *
120 * Note: Lock devfreq->lock before calling update_devfreq
121 * This function is exported for governors.
122 */
123int update_devfreq(struct devfreq *devfreq)
124{
125 unsigned long freq;
126 int err = 0;
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100127 u32 flags = 0;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200128
129 if (!mutex_is_locked(&devfreq->lock)) {
130 WARN(true, "devfreq->lock must be locked by the caller.\n");
131 return -EINVAL;
132 }
133
134 /* Reevaluate the proper frequency */
135 err = devfreq->governor->get_target_freq(devfreq, &freq);
136 if (err)
137 return err;
138
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100139 /*
140 * Adjust the freuqency with user freq and QoS.
141 *
142 * List from the highest proiority
143 * max_freq (probably called by thermal when it's too hot)
144 * min_freq
145 */
146
147 if (devfreq->min_freq && freq < devfreq->min_freq) {
148 freq = devfreq->min_freq;
149 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
150 }
151 if (devfreq->max_freq && freq > devfreq->max_freq) {
152 freq = devfreq->max_freq;
153 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
154 }
155
156 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200157 if (err)
158 return err;
159
Jonghwa Leee552bba2012-08-23 20:00:46 +0900160 if (devfreq->profile->freq_table)
161 if (devfreq_update_status(devfreq, freq))
162 dev_err(&devfreq->dev,
163 "Couldn't update frequency transition information.\n");
164
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200165 devfreq->previous_freq = freq;
166 return err;
167}
168
169/**
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200170 * devfreq_monitor() - Periodically poll devfreq objects.
171 * @work: the work struct used to run devfreq_monitor periodically.
172 *
173 */
174static void devfreq_monitor(struct work_struct *work)
175{
176 int err;
177 struct devfreq *devfreq = container_of(work,
178 struct devfreq, work.work);
179
180 mutex_lock(&devfreq->lock);
181 err = update_devfreq(devfreq);
182 if (err)
183 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
184
185 queue_delayed_work(devfreq_wq, &devfreq->work,
186 msecs_to_jiffies(devfreq->profile->polling_ms));
187 mutex_unlock(&devfreq->lock);
188}
189
190/**
191 * devfreq_monitor_start() - Start load monitoring of devfreq instance
192 * @devfreq: the devfreq instance.
193 *
194 * Helper function for starting devfreq device load monitoing. By
195 * default delayed work based monitoring is supported. Function
196 * to be called from governor in response to DEVFREQ_GOV_START
197 * event when device is added to devfreq framework.
198 */
199void devfreq_monitor_start(struct devfreq *devfreq)
200{
201 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
202 if (devfreq->profile->polling_ms)
203 queue_delayed_work(devfreq_wq, &devfreq->work,
204 msecs_to_jiffies(devfreq->profile->polling_ms));
205}
206
207/**
208 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
209 * @devfreq: the devfreq instance.
210 *
211 * Helper function to stop devfreq device load monitoing. Function
212 * to be called from governor in response to DEVFREQ_GOV_STOP
213 * event when device is removed from devfreq framework.
214 */
215void devfreq_monitor_stop(struct devfreq *devfreq)
216{
217 cancel_delayed_work_sync(&devfreq->work);
218}
219
220/**
221 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
222 * @devfreq: the devfreq instance.
223 *
224 * Helper function to suspend devfreq device load monitoing. Function
225 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
226 * event or when polling interval is set to zero.
227 *
228 * Note: Though this function is same as devfreq_monitor_stop(),
229 * intentionally kept separate to provide hooks for collecting
230 * transition statistics.
231 */
232void devfreq_monitor_suspend(struct devfreq *devfreq)
233{
234 mutex_lock(&devfreq->lock);
235 if (devfreq->stop_polling) {
236 mutex_unlock(&devfreq->lock);
237 return;
238 }
239
240 devfreq->stop_polling = true;
241 mutex_unlock(&devfreq->lock);
242 cancel_delayed_work_sync(&devfreq->work);
243}
244
245/**
246 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
247 * @devfreq: the devfreq instance.
248 *
249 * Helper function to resume devfreq device load monitoing. Function
250 * to be called from governor in response to DEVFREQ_GOV_RESUME
251 * event or when polling interval is set to non-zero.
252 */
253void devfreq_monitor_resume(struct devfreq *devfreq)
254{
255 mutex_lock(&devfreq->lock);
256 if (!devfreq->stop_polling)
257 goto out;
258
259 if (!delayed_work_pending(&devfreq->work) &&
260 devfreq->profile->polling_ms)
261 queue_delayed_work(devfreq_wq, &devfreq->work,
262 msecs_to_jiffies(devfreq->profile->polling_ms));
263 devfreq->stop_polling = false;
264
265out:
266 mutex_unlock(&devfreq->lock);
267}
268
269/**
270 * devfreq_interval_update() - Update device devfreq monitoring interval
271 * @devfreq: the devfreq instance.
272 * @delay: new polling interval to be set.
273 *
274 * Helper function to set new load monitoring polling interval. Function
275 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
276 */
277void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
278{
279 unsigned int cur_delay = devfreq->profile->polling_ms;
280 unsigned int new_delay = *delay;
281
282 mutex_lock(&devfreq->lock);
283 devfreq->profile->polling_ms = new_delay;
284
285 if (devfreq->stop_polling)
286 goto out;
287
288 /* if new delay is zero, stop polling */
289 if (!new_delay) {
290 mutex_unlock(&devfreq->lock);
291 cancel_delayed_work_sync(&devfreq->work);
292 return;
293 }
294
295 /* if current delay is zero, start polling with new delay */
296 if (!cur_delay) {
297 queue_delayed_work(devfreq_wq, &devfreq->work,
298 msecs_to_jiffies(devfreq->profile->polling_ms));
299 goto out;
300 }
301
302 /* if current delay is greater than new delay, restart polling */
303 if (cur_delay > new_delay) {
304 mutex_unlock(&devfreq->lock);
305 cancel_delayed_work_sync(&devfreq->work);
306 mutex_lock(&devfreq->lock);
307 if (!devfreq->stop_polling)
308 queue_delayed_work(devfreq_wq, &devfreq->work,
309 msecs_to_jiffies(devfreq->profile->polling_ms));
310 }
311out:
312 mutex_unlock(&devfreq->lock);
313}
314
315/**
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200316 * devfreq_notifier_call() - Notify that the device frequency requirements
317 * has been changed out of devfreq framework.
Nishanth Menonc5b4a1c12012-10-26 01:50:35 +0200318 * @nb: the notifier_block (supposed to be devfreq->nb)
319 * @type: not used
320 * @devp: not used
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200321 *
322 * Called by a notifier that uses devfreq->nb.
323 */
324static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
325 void *devp)
326{
327 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
328 int ret;
329
330 mutex_lock(&devfreq->lock);
331 ret = update_devfreq(devfreq);
332 mutex_unlock(&devfreq->lock);
333
334 return ret;
335}
336
337/**
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200338 * _remove_devfreq() - Remove devfreq from the list and release its resources.
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200339 * @devfreq: the devfreq struct
340 * @skip: skip calling device_unregister().
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200341 */
342static void _remove_devfreq(struct devfreq *devfreq, bool skip)
343{
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200344 mutex_lock(&devfreq_list_lock);
345 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
346 mutex_unlock(&devfreq_list_lock);
347 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200348 return;
349 }
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200350 list_del(&devfreq->node);
351 mutex_unlock(&devfreq_list_lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200352
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200353 devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200354
355 if (devfreq->profile->exit)
356 devfreq->profile->exit(devfreq->dev.parent);
357
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200358 if (!skip && get_device(&devfreq->dev)) {
359 device_unregister(&devfreq->dev);
360 put_device(&devfreq->dev);
361 }
362
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200363 mutex_destroy(&devfreq->lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200364 kfree(devfreq);
365}
366
367/**
368 * devfreq_dev_release() - Callback for struct device to release the device.
369 * @dev: the devfreq device
370 *
371 * This calls _remove_devfreq() if _remove_devfreq() is not called.
372 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
373 * well as by others unregistering the device.
374 */
375static void devfreq_dev_release(struct device *dev)
376{
377 struct devfreq *devfreq = to_devfreq(dev);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200378
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200379 _remove_devfreq(devfreq, true);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200380}
381
382/**
383 * devfreq_add_device() - Add devfreq feature to the device
384 * @dev: the device to add devfreq feature.
385 * @profile: device-specific profile to run devfreq.
386 * @governor: the policy to choose frequency.
387 * @data: private data for the governor. The devfreq framework does not
388 * touch this value.
389 */
390struct devfreq *devfreq_add_device(struct device *dev,
391 struct devfreq_dev_profile *profile,
392 const struct devfreq_governor *governor,
393 void *data)
394{
395 struct devfreq *devfreq;
396 int err = 0;
397
398 if (!dev || !profile || !governor) {
399 dev_err(dev, "%s: Invalid parameters.\n", __func__);
400 return ERR_PTR(-EINVAL);
401 }
402
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200403 mutex_lock(&devfreq_list_lock);
404 devfreq = find_device_devfreq(dev);
405 mutex_unlock(&devfreq_list_lock);
406 if (!IS_ERR(devfreq)) {
407 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
408 err = -EINVAL;
409 goto err_out;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200410 }
411
412 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
413 if (!devfreq) {
414 dev_err(dev, "%s: Unable to create devfreq for the device\n",
415 __func__);
416 err = -ENOMEM;
Axel Lin3f19f082011-11-15 21:59:09 +0100417 goto err_out;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200418 }
419
420 mutex_init(&devfreq->lock);
421 mutex_lock(&devfreq->lock);
422 devfreq->dev.parent = dev;
423 devfreq->dev.class = devfreq_class;
424 devfreq->dev.release = devfreq_dev_release;
425 devfreq->profile = profile;
426 devfreq->governor = governor;
427 devfreq->previous_freq = profile->initial_freq;
428 devfreq->data = data;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200429 devfreq->nb.notifier_call = devfreq_notifier_call;
430
Jonghwa Leee552bba2012-08-23 20:00:46 +0900431 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
432 devfreq->profile->max_state *
433 devfreq->profile->max_state,
434 GFP_KERNEL);
435 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
436 devfreq->profile->max_state,
437 GFP_KERNEL);
438 devfreq->last_stat_updated = jiffies;
439
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200440 dev_set_name(&devfreq->dev, dev_name(dev));
441 err = device_register(&devfreq->dev);
442 if (err) {
443 put_device(&devfreq->dev);
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200444 mutex_unlock(&devfreq->lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200445 goto err_dev;
446 }
447
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200448 mutex_unlock(&devfreq->lock);
449
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200450 mutex_lock(&devfreq_list_lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200451 list_add(&devfreq->node, &devfreq_list);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200452 mutex_unlock(&devfreq_list_lock);
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200453
454 err = devfreq->governor->event_handler(devfreq,
455 DEVFREQ_GOV_START, NULL);
456 if (err) {
457 dev_err(dev, "%s: Unable to start governor for the device\n",
458 __func__);
459 goto err_init;
460 }
461
Axel Lin3f19f082011-11-15 21:59:09 +0100462 return devfreq;
463
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200464err_init:
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200465 list_del(&devfreq->node);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200466 device_unregister(&devfreq->dev);
467err_dev:
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200468 kfree(devfreq);
Axel Lin3f19f082011-11-15 21:59:09 +0100469err_out:
470 return ERR_PTR(err);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200471}
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200472EXPORT_SYMBOL(devfreq_add_device);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200473
474/**
475 * devfreq_remove_device() - Remove devfreq feature from a device.
Nishanth Menonc5b4a1c12012-10-26 01:50:35 +0200476 * @devfreq: the devfreq instance to be removed
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200477 */
478int devfreq_remove_device(struct devfreq *devfreq)
479{
480 if (!devfreq)
481 return -EINVAL;
482
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200483 _remove_devfreq(devfreq, false);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200484
485 return 0;
486}
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200487EXPORT_SYMBOL(devfreq_remove_device);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200488
Rajagopal Venkat206c30c2012-10-26 01:50:18 +0200489/**
490 * devfreq_suspend_device() - Suspend devfreq of a device.
491 * @devfreq: the devfreq instance to be suspended
492 */
493int devfreq_suspend_device(struct devfreq *devfreq)
494{
495 if (!devfreq)
496 return -EINVAL;
497
498 return devfreq->governor->event_handler(devfreq,
499 DEVFREQ_GOV_SUSPEND, NULL);
500}
501EXPORT_SYMBOL(devfreq_suspend_device);
502
503/**
504 * devfreq_resume_device() - Resume devfreq of a device.
505 * @devfreq: the devfreq instance to be resumed
506 */
507int devfreq_resume_device(struct devfreq *devfreq)
508{
509 if (!devfreq)
510 return -EINVAL;
511
512 return devfreq->governor->event_handler(devfreq,
513 DEVFREQ_GOV_RESUME, NULL);
514}
515EXPORT_SYMBOL(devfreq_resume_device);
516
MyungJoo Ham9005b652011-10-02 00:19:28 +0200517static ssize_t show_governor(struct device *dev,
518 struct device_attribute *attr, char *buf)
519{
520 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
521}
522
523static ssize_t show_freq(struct device *dev,
524 struct device_attribute *attr, char *buf)
525{
Rajagopal Venkat7f98a902012-10-26 01:50:26 +0200526 unsigned long freq;
527 struct devfreq *devfreq = to_devfreq(dev);
528
529 if (devfreq->profile->get_cur_freq &&
530 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
531 return sprintf(buf, "%lu\n", freq);
532
533 return sprintf(buf, "%lu\n", devfreq->previous_freq);
534}
535
536static ssize_t show_target_freq(struct device *dev,
537 struct device_attribute *attr, char *buf)
538{
MyungJoo Ham9005b652011-10-02 00:19:28 +0200539 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
540}
541
542static ssize_t show_polling_interval(struct device *dev,
543 struct device_attribute *attr, char *buf)
544{
545 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
546}
547
548static ssize_t store_polling_interval(struct device *dev,
549 struct device_attribute *attr,
550 const char *buf, size_t count)
551{
552 struct devfreq *df = to_devfreq(dev);
553 unsigned int value;
554 int ret;
555
556 ret = sscanf(buf, "%u", &value);
557 if (ret != 1)
Nishanth Menon12e26262012-10-26 01:50:43 +0200558 return -EINVAL;
MyungJoo Ham9005b652011-10-02 00:19:28 +0200559
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200560 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
MyungJoo Ham9005b652011-10-02 00:19:28 +0200561 ret = count;
562
MyungJoo Ham9005b652011-10-02 00:19:28 +0200563 return ret;
564}
565
MyungJoo Ham6530b9de2011-12-09 16:42:19 +0900566static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
567 const char *buf, size_t count)
568{
569 struct devfreq *df = to_devfreq(dev);
570 unsigned long value;
571 int ret;
572 unsigned long max;
573
574 ret = sscanf(buf, "%lu", &value);
575 if (ret != 1)
Nishanth Menon12e26262012-10-26 01:50:43 +0200576 return -EINVAL;
MyungJoo Ham6530b9de2011-12-09 16:42:19 +0900577
578 mutex_lock(&df->lock);
579 max = df->max_freq;
580 if (value && max && value > max) {
581 ret = -EINVAL;
582 goto unlock;
583 }
584
585 df->min_freq = value;
586 update_devfreq(df);
587 ret = count;
588unlock:
589 mutex_unlock(&df->lock);
MyungJoo Ham6530b9de2011-12-09 16:42:19 +0900590 return ret;
591}
592
593static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
594 char *buf)
595{
596 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
597}
598
599static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
600 const char *buf, size_t count)
601{
602 struct devfreq *df = to_devfreq(dev);
603 unsigned long value;
604 int ret;
605 unsigned long min;
606
607 ret = sscanf(buf, "%lu", &value);
608 if (ret != 1)
Nishanth Menon12e26262012-10-26 01:50:43 +0200609 return -EINVAL;
MyungJoo Ham6530b9de2011-12-09 16:42:19 +0900610
611 mutex_lock(&df->lock);
612 min = df->min_freq;
613 if (value && min && value < min) {
614 ret = -EINVAL;
615 goto unlock;
616 }
617
618 df->max_freq = value;
619 update_devfreq(df);
620 ret = count;
621unlock:
622 mutex_unlock(&df->lock);
MyungJoo Ham6530b9de2011-12-09 16:42:19 +0900623 return ret;
624}
625
626static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
627 char *buf)
628{
629 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
630}
631
Nishanth Menond287de82012-10-25 19:48:59 -0500632static ssize_t show_available_freqs(struct device *d,
633 struct device_attribute *attr,
634 char *buf)
635{
636 struct devfreq *df = to_devfreq(d);
637 struct device *dev = df->dev.parent;
638 struct opp *opp;
639 ssize_t count = 0;
640 unsigned long freq = 0;
641
642 rcu_read_lock();
643 do {
644 opp = opp_find_freq_ceil(dev, &freq);
645 if (IS_ERR(opp))
646 break;
647
648 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
649 "%lu ", freq);
650 freq++;
651 } while (1);
652 rcu_read_unlock();
653
654 /* Truncate the trailing space */
655 if (count)
656 count--;
657
658 count += sprintf(&buf[count], "\n");
659
660 return count;
661}
662
Jonghwa Leee552bba2012-08-23 20:00:46 +0900663static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
664 char *buf)
665{
666 struct devfreq *devfreq = to_devfreq(dev);
667 ssize_t len;
668 int i, j, err;
669 unsigned int max_state = devfreq->profile->max_state;
670
671 err = devfreq_update_status(devfreq, devfreq->previous_freq);
672 if (err)
673 return 0;
674
675 len = sprintf(buf, " From : To\n");
676 len += sprintf(buf + len, " :");
677 for (i = 0; i < max_state; i++)
678 len += sprintf(buf + len, "%8u",
679 devfreq->profile->freq_table[i]);
680
681 len += sprintf(buf + len, " time(ms)\n");
682
683 for (i = 0; i < max_state; i++) {
684 if (devfreq->profile->freq_table[i]
685 == devfreq->previous_freq) {
686 len += sprintf(buf + len, "*");
687 } else {
688 len += sprintf(buf + len, " ");
689 }
690 len += sprintf(buf + len, "%8u:",
691 devfreq->profile->freq_table[i]);
692 for (j = 0; j < max_state; j++)
693 len += sprintf(buf + len, "%8u",
694 devfreq->trans_table[(i * max_state) + j]);
695 len += sprintf(buf + len, "%10u\n",
696 jiffies_to_msecs(devfreq->time_in_state[i]));
697 }
698
699 len += sprintf(buf + len, "Total transition : %u\n",
700 devfreq->total_trans);
701 return len;
702}
703
MyungJoo Ham9005b652011-10-02 00:19:28 +0200704static struct device_attribute devfreq_attrs[] = {
705 __ATTR(governor, S_IRUGO, show_governor, NULL),
706 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
Nishanth Menond287de82012-10-25 19:48:59 -0500707 __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
Rajagopal Venkat7f98a902012-10-26 01:50:26 +0200708 __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
MyungJoo Ham9005b652011-10-02 00:19:28 +0200709 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
710 store_polling_interval),
MyungJoo Ham6530b9de2011-12-09 16:42:19 +0900711 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
712 __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
Jonghwa Leee552bba2012-08-23 20:00:46 +0900713 __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
MyungJoo Ham9005b652011-10-02 00:19:28 +0200714 { },
715};
716
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200717static int __init devfreq_init(void)
718{
719 devfreq_class = class_create(THIS_MODULE, "devfreq");
720 if (IS_ERR(devfreq_class)) {
721 pr_err("%s: couldn't create class\n", __FILE__);
722 return PTR_ERR(devfreq_class);
723 }
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200724
725 devfreq_wq = create_freezable_workqueue("devfreq_wq");
726 if (IS_ERR(devfreq_wq)) {
727 class_destroy(devfreq_class);
728 pr_err("%s: couldn't create workqueue\n", __FILE__);
729 return PTR_ERR(devfreq_wq);
730 }
MyungJoo Ham9005b652011-10-02 00:19:28 +0200731 devfreq_class->dev_attrs = devfreq_attrs;
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200732
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200733 return 0;
734}
735subsys_initcall(devfreq_init);
736
737static void __exit devfreq_exit(void)
738{
739 class_destroy(devfreq_class);
Rajagopal Venkat7e6fdd42012-10-26 01:50:09 +0200740 destroy_workqueue(devfreq_wq);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200741}
742module_exit(devfreq_exit);
743
744/*
745 * The followings are helper functions for devfreq user device drivers with
746 * OPP framework.
747 */
748
749/**
750 * devfreq_recommended_opp() - Helper function to get proper OPP for the
751 * freq value given to target callback.
Nishanth Menonc5b4a1c12012-10-26 01:50:35 +0200752 * @dev: The devfreq user device. (parent of devfreq)
753 * @freq: The frequency given to target function
754 * @flags: Flags handed from devfreq framework.
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200755 *
756 */
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100757struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
758 u32 flags)
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200759{
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100760 struct opp *opp;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200761
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100762 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
763 /* The freq is an upper bound. opp should be lower */
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200764 opp = opp_find_freq_floor(dev, freq);
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100765
766 /* If not available, use the closest opp */
767 if (opp == ERR_PTR(-ENODEV))
768 opp = opp_find_freq_ceil(dev, freq);
769 } else {
770 /* The freq is an lower bound. opp should be higher */
771 opp = opp_find_freq_ceil(dev, freq);
772
773 /* If not available, use the closest opp */
774 if (opp == ERR_PTR(-ENODEV))
775 opp = opp_find_freq_floor(dev, freq);
776 }
777
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200778 return opp;
779}
780
781/**
782 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
783 * for any changes in the OPP availability
784 * changes
Nishanth Menonc5b4a1c12012-10-26 01:50:35 +0200785 * @dev: The devfreq user device. (parent of devfreq)
786 * @devfreq: The devfreq object.
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200787 */
788int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
789{
790 struct srcu_notifier_head *nh = opp_get_notifier(dev);
791
792 if (IS_ERR(nh))
793 return PTR_ERR(nh);
794 return srcu_notifier_chain_register(nh, &devfreq->nb);
795}
796
797/**
798 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
799 * notified for any changes in the OPP
800 * availability changes anymore.
Nishanth Menonc5b4a1c12012-10-26 01:50:35 +0200801 * @dev: The devfreq user device. (parent of devfreq)
802 * @devfreq: The devfreq object.
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200803 *
804 * At exit() callback of devfreq_dev_profile, this must be included if
805 * devfreq_recommended_opp is used.
806 */
807int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
808{
809 struct srcu_notifier_head *nh = opp_get_notifier(dev);
810
811 if (IS_ERR(nh))
812 return PTR_ERR(nh);
813 return srcu_notifier_chain_unregister(nh, &devfreq->nb);
814}
815
816MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
817MODULE_DESCRIPTION("devfreq class support");
818MODULE_LICENSE("GPL");