blob: c705271dc1f2bfd42ab7abd8677ba44d46002753 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell Kingf8ce2542006-01-07 16:15:52 +00002 * linux/include/linux/clk.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
Mike Turquetteb24764902012-03-15 23:11:19 -07006 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
Todd Poynor686f8c52006-03-25 18:15:24 +000012#ifndef __LINUX_CLK_H
13#define __LINUX_CLK_H
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Shawn Guo9f1612d2012-07-18 11:52:22 +080015#include <linux/err.h>
Russell King40d3e0f2011-09-22 11:30:50 +010016#include <linux/kernel.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070017#include <linux/notifier.h>
Russell King40d3e0f2011-09-22 11:30:50 +010018
Linus Torvalds1da177e2005-04-16 15:20:36 -070019struct device;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020struct clk;
Kuninori Morimoto71a2f112016-12-05 05:23:20 +000021struct device_node;
22struct of_phandle_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Mike Turquetteb24764902012-03-15 23:11:19 -070024/**
25 * DOC: clk notifier callback types
26 *
27 * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
28 * to indicate that the rate change will proceed. Drivers must
29 * immediately terminate any operations that will be affected by the
Soren Brinkmannfb72a052013-04-03 12:17:12 -070030 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
31 * NOTIFY_STOP or NOTIFY_BAD.
Mike Turquetteb24764902012-03-15 23:11:19 -070032 *
33 * ABORT_RATE_CHANGE: called if the rate change failed for some reason
34 * after PRE_RATE_CHANGE. In this case, all registered notifiers on
35 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
Soren Brinkmannfb72a052013-04-03 12:17:12 -070036 * always return NOTIFY_DONE or NOTIFY_OK.
Mike Turquetteb24764902012-03-15 23:11:19 -070037 *
38 * POST_RATE_CHANGE - called after the clk rate change has successfully
Soren Brinkmannfb72a052013-04-03 12:17:12 -070039 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
Mike Turquetteb24764902012-03-15 23:11:19 -070040 *
41 */
42#define PRE_RATE_CHANGE BIT(0)
43#define POST_RATE_CHANGE BIT(1)
44#define ABORT_RATE_CHANGE BIT(2)
45
46/**
47 * struct clk_notifier - associate a clk with a notifier
48 * @clk: struct clk * to associate the notifier with
49 * @notifier_head: a blocking_notifier_head for this clk
50 * @node: linked list pointers
51 *
52 * A list of struct clk_notifier is maintained by the notifier code.
53 * An entry is created whenever code registers the first notifier on a
54 * particular @clk. Future notifiers on that @clk are added to the
55 * @notifier_head.
56 */
57struct clk_notifier {
58 struct clk *clk;
59 struct srcu_notifier_head notifier_head;
60 struct list_head node;
61};
62
63/**
64 * struct clk_notifier_data - rate data to pass to the notifier callback
65 * @clk: struct clk * being changed
66 * @old_rate: previous rate of this clk
67 * @new_rate: new rate of this clk
68 *
69 * For a pre-notifier, old_rate is the clk's rate before this rate
70 * change, and new_rate is what the rate will be in the future. For a
71 * post-notifier, old_rate and new_rate are both set to the clk's
72 * current rate (this was done to optimize the implementation).
73 */
74struct clk_notifier_data {
75 struct clk *clk;
76 unsigned long old_rate;
77 unsigned long new_rate;
78};
79
Dong Aisheng266e4e92017-05-19 21:49:04 +080080/**
81 * struct clk_bulk_data - Data used for bulk clk operations.
82 *
83 * @id: clock consumer ID
84 * @clk: struct clk * to store the associated clock
85 *
86 * The CLK APIs provide a series of clk_bulk_() API calls as
87 * a convenience to consumers which require multiple clks. This
88 * structure is used to manage data for these calls.
89 */
90struct clk_bulk_data {
91 const char *id;
92 struct clk *clk;
93};
94
Krzysztof Kozlowskie81b87d2016-06-28 13:25:04 +020095#ifdef CONFIG_COMMON_CLK
96
Mike Turquette86bcfa22014-02-24 16:08:41 -080097/**
98 * clk_notifier_register: register a clock rate-change notifier callback
99 * @clk: clock whose rate we are interested in
100 * @nb: notifier block with callback function pointer
101 *
102 * ProTip: debugging across notifier chains can be frustrating. Make sure that
103 * your notifier callback function prints a nice big warning in case of
104 * failure.
105 */
Mike Turquetteb24764902012-03-15 23:11:19 -0700106int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
107
Mike Turquette86bcfa22014-02-24 16:08:41 -0800108/**
109 * clk_notifier_unregister: unregister a clock rate-change notifier callback
110 * @clk: clock whose rate we are no longer interested in
111 * @nb: notifier block which will be unregistered
112 */
Mike Turquetteb24764902012-03-15 23:11:19 -0700113int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
114
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100115/**
116 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
117 * for a clock source.
118 * @clk: clock source
119 *
120 * This gets the clock source accuracy expressed in ppb.
121 * A perfect clock returns 0.
122 */
123long clk_get_accuracy(struct clk *clk);
124
Mike Turquettee59c5372014-02-18 21:21:25 -0800125/**
126 * clk_set_phase - adjust the phase shift of a clock signal
127 * @clk: clock signal source
128 * @degrees: number of degrees the signal is shifted
129 *
130 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
131 * success, -EERROR otherwise.
132 */
133int clk_set_phase(struct clk *clk, int degrees);
134
135/**
136 * clk_get_phase - return the phase shift of a clock signal
137 * @clk: clock signal source
138 *
139 * Returns the phase shift of a clock node in degrees, otherwise returns
140 * -EERROR.
141 */
142int clk_get_phase(struct clk *clk);
143
Michael Turquette3d3801e2015-02-25 09:11:01 -0800144/**
Jerome Brunet9fba7382018-06-19 16:41:41 +0200145 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
146 * @clk: clock signal source
147 * @num: numerator of the duty cycle ratio to be applied
148 * @den: denominator of the duty cycle ratio to be applied
149 *
150 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
151 * success, -EERROR otherwise.
152 */
153int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
154
155/**
156 * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
157 * @clk: clock signal source
158 * @scale: scaling factor to be applied to represent the ratio as an integer
159 *
160 * Returns the duty cycle ratio multiplied by the scale provided, otherwise
161 * returns -EERROR.
162 */
163int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
164
165/**
Michael Turquette3d3801e2015-02-25 09:11:01 -0800166 * clk_is_match - check if two clk's point to the same hardware clock
167 * @p: clk compared against q
168 * @q: clk compared against p
169 *
170 * Returns true if the two struct clk pointers both point to the same hardware
mchehab@s-opensource.com0e056eb2017-03-30 17:11:36 -0300171 * clock node. Put differently, returns true if @p and @q
172 * share the same &struct clk_core object.
Michael Turquette3d3801e2015-02-25 09:11:01 -0800173 *
174 * Returns false otherwise. Note that two NULL clks are treated as matching.
175 */
176bool clk_is_match(const struct clk *p, const struct clk *q);
177
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100178#else
179
Krzysztof Kozlowskie81b87d2016-06-28 13:25:04 +0200180static inline int clk_notifier_register(struct clk *clk,
181 struct notifier_block *nb)
182{
183 return -ENOTSUPP;
184}
185
186static inline int clk_notifier_unregister(struct clk *clk,
187 struct notifier_block *nb)
188{
189 return -ENOTSUPP;
190}
191
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100192static inline long clk_get_accuracy(struct clk *clk)
193{
194 return -ENOTSUPP;
195}
196
Mike Turquettee59c5372014-02-18 21:21:25 -0800197static inline long clk_set_phase(struct clk *clk, int phase)
198{
199 return -ENOTSUPP;
200}
201
202static inline long clk_get_phase(struct clk *clk)
203{
204 return -ENOTSUPP;
205}
206
Jerome Brunet9fba7382018-06-19 16:41:41 +0200207static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
208 unsigned int den)
209{
210 return -ENOTSUPP;
211}
212
213static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
214 unsigned int scale)
215{
216 return 0;
217}
218
Michael Turquette3d3801e2015-02-25 09:11:01 -0800219static inline bool clk_is_match(const struct clk *p, const struct clk *q)
220{
221 return p == q;
222}
223
Mark Brown7e87aed2012-04-01 15:31:23 +0100224#endif
Mike Turquetteb24764902012-03-15 23:11:19 -0700225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226/**
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700227 * clk_prepare - prepare a clock source
228 * @clk: clock source
229 *
230 * This prepares the clock source for use.
231 *
232 * Must not be called from within atomic context.
233 */
234#ifdef CONFIG_HAVE_CLK_PREPARE
235int clk_prepare(struct clk *clk);
Dong Aisheng266e4e92017-05-19 21:49:04 +0800236int __must_check clk_bulk_prepare(int num_clks,
237 const struct clk_bulk_data *clks);
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700238#else
239static inline int clk_prepare(struct clk *clk)
240{
241 might_sleep();
242 return 0;
243}
Dong Aisheng266e4e92017-05-19 21:49:04 +0800244
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800245static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
Dong Aisheng266e4e92017-05-19 21:49:04 +0800246{
247 might_sleep();
248 return 0;
249}
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700250#endif
251
252/**
253 * clk_unprepare - undo preparation of a clock source
254 * @clk: clock source
255 *
256 * This undoes a previously prepared clock. The caller must balance
257 * the number of prepare and unprepare calls.
258 *
259 * Must not be called from within atomic context.
260 */
261#ifdef CONFIG_HAVE_CLK_PREPARE
262void clk_unprepare(struct clk *clk);
Dong Aisheng266e4e92017-05-19 21:49:04 +0800263void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700264#else
265static inline void clk_unprepare(struct clk *clk)
266{
267 might_sleep();
268}
Dong Aisheng266e4e92017-05-19 21:49:04 +0800269static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
270{
271 might_sleep();
272}
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700273#endif
274
275#ifdef CONFIG_HAVE_CLK
276/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 * clk_get - lookup and obtain a reference to a clock producer.
278 * @dev: device for clock "consumer"
Jan-Simon Möllera58b3a42012-07-23 20:48:56 +0200279 * @id: clock consumer ID
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 *
281 * Returns a struct clk corresponding to the clock producer, or
Russell Kingea3f4ea2005-04-27 18:19:55 +0100282 * valid IS_ERR() condition containing errno. The implementation
283 * uses @dev and @id to determine the clock consumer, and thereby
284 * the clock producer. (IOW, @id may be identical strings, but
285 * clk_get may return different clock producers depending on @dev.)
Russell Kingf47fc0a2006-01-03 18:34:20 +0000286 *
287 * Drivers must assume that the clock source is not enabled.
Alex Raimondif7ad1602008-10-15 22:02:03 -0700288 *
289 * clk_get should not be called from within interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 */
291struct clk *clk_get(struct device *dev, const char *id);
292
293/**
Dong Aisheng266e4e92017-05-19 21:49:04 +0800294 * clk_bulk_get - lookup and obtain a number of references to clock producer.
295 * @dev: device for clock "consumer"
296 * @num_clks: the number of clk_bulk_data
297 * @clks: the clk_bulk_data table of consumer
298 *
299 * This helper function allows drivers to get several clk consumers in one
300 * operation. If any of the clk cannot be acquired then any clks
301 * that were obtained will be freed before returning to the caller.
302 *
303 * Returns 0 if all clocks specified in clk_bulk_data table are obtained
304 * successfully, or valid IS_ERR() condition containing errno.
305 * The implementation uses @dev and @clk_bulk_data.id to determine the
306 * clock consumer, and thereby the clock producer.
307 * The clock returned is stored in each @clk_bulk_data.clk field.
308 *
309 * Drivers must assume that the clock source is not enabled.
310 *
311 * clk_bulk_get should not be called from within interrupt context.
312 */
313int __must_check clk_bulk_get(struct device *dev, int num_clks,
314 struct clk_bulk_data *clks);
Dong Aisheng616e45d2018-08-31 12:45:54 +0800315/**
316 * clk_bulk_get_all - lookup and obtain all available references to clock
317 * producer.
318 * @dev: device for clock "consumer"
319 * @clks: pointer to the clk_bulk_data table of consumer
320 *
321 * This helper function allows drivers to get all clk consumers in one
322 * operation. If any of the clk cannot be acquired then any clks
323 * that were obtained will be freed before returning to the caller.
324 *
325 * Returns a positive value for the number of clocks obtained while the
326 * clock references are stored in the clk_bulk_data table in @clks field.
327 * Returns 0 if there're none and a negative value if something failed.
328 *
329 * Drivers must assume that the clock source is not enabled.
330 *
331 * clk_bulk_get should not be called from within interrupt context.
332 */
333int __must_check clk_bulk_get_all(struct device *dev,
334 struct clk_bulk_data **clks);
Dong Aisheng266e4e92017-05-19 21:49:04 +0800335/**
Dong Aisheng618aee02017-05-19 21:49:05 +0800336 * devm_clk_bulk_get - managed get multiple clk consumers
337 * @dev: device for clock "consumer"
338 * @num_clks: the number of clk_bulk_data
339 * @clks: the clk_bulk_data table of consumer
340 *
341 * Return 0 on success, an errno on failure.
342 *
343 * This helper function allows drivers to get several clk
344 * consumers in one operation with management, the clks will
345 * automatically be freed when the device is unbound.
346 */
347int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
348 struct clk_bulk_data *clks);
Dong Aishengf08c2e22018-08-31 12:45:55 +0800349/**
350 * devm_clk_bulk_get_all - managed get multiple clk consumers
351 * @dev: device for clock "consumer"
352 * @clks: pointer to the clk_bulk_data table of consumer
353 *
354 * Returns a positive value for the number of clocks obtained while the
355 * clock references are stored in the clk_bulk_data table in @clks field.
356 * Returns 0 if there're none and a negative value if something failed.
357 *
358 * This helper function allows drivers to get several clk
359 * consumers in one operation with management, the clks will
360 * automatically be freed when the device is unbound.
361 */
362
363int __must_check devm_clk_bulk_get_all(struct device *dev,
364 struct clk_bulk_data **clks);
Dong Aisheng618aee02017-05-19 21:49:05 +0800365
366/**
Mark Browna8a97db2012-04-05 11:42:09 +0100367 * devm_clk_get - lookup and obtain a managed reference to a clock producer.
368 * @dev: device for clock "consumer"
Jan-Simon Möllera58b3a42012-07-23 20:48:56 +0200369 * @id: clock consumer ID
Mark Browna8a97db2012-04-05 11:42:09 +0100370 *
371 * Returns a struct clk corresponding to the clock producer, or
372 * valid IS_ERR() condition containing errno. The implementation
373 * uses @dev and @id to determine the clock consumer, and thereby
374 * the clock producer. (IOW, @id may be identical strings, but
375 * clk_get may return different clock producers depending on @dev.)
376 *
377 * Drivers must assume that the clock source is not enabled.
378 *
379 * devm_clk_get should not be called from within interrupt context.
380 *
381 * The clock will automatically be freed when the device is unbound
382 * from the bus.
383 */
384struct clk *devm_clk_get(struct device *dev, const char *id);
385
386/**
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000387 * devm_get_clk_from_child - lookup and obtain a managed reference to a
388 * clock producer from child node.
389 * @dev: device for clock "consumer"
390 * @np: pointer to clock consumer node
391 * @con_id: clock consumer ID
392 *
393 * This function parses the clocks, and uses them to look up the
394 * struct clk from the registered list of clock providers by using
395 * @np and @con_id
396 *
397 * The clock will automatically be freed when the device is unbound
398 * from the bus.
399 */
400struct clk *devm_get_clk_from_child(struct device *dev,
401 struct device_node *np, const char *con_id);
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100402/**
403 * clk_rate_exclusive_get - get exclusivity over the rate control of a
404 * producer
405 * @clk: clock source
406 *
407 * This function allows drivers to get exclusive control over the rate of a
408 * provider. It prevents any other consumer to execute, even indirectly,
409 * opereation which could alter the rate of the provider or cause glitches
410 *
411 * If exlusivity is claimed more than once on clock, even by the same driver,
412 * the rate effectively gets locked as exclusivity can't be preempted.
413 *
414 * Must not be called from within atomic context.
415 *
416 * Returns success (0) or negative errno.
417 */
418int clk_rate_exclusive_get(struct clk *clk);
419
420/**
421 * clk_rate_exclusive_put - release exclusivity over the rate control of a
422 * producer
423 * @clk: clock source
424 *
425 * This function allows drivers to release the exclusivity it previously got
426 * from clk_rate_exclusive_get()
427 *
428 * The caller must balance the number of clk_rate_exclusive_get() and
429 * clk_rate_exclusive_put() calls.
430 *
431 * Must not be called from within atomic context.
432 */
433void clk_rate_exclusive_put(struct clk *clk);
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000434
435/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 * clk_enable - inform the system when the clock source should be running.
437 * @clk: clock source
438 *
439 * If the clock can not be enabled/disabled, this should return success.
440 *
Russell King40d3e0f2011-09-22 11:30:50 +0100441 * May be called from atomic contexts.
442 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 * Returns success (0) or negative errno.
444 */
445int clk_enable(struct clk *clk);
446
447/**
Dong Aisheng266e4e92017-05-19 21:49:04 +0800448 * clk_bulk_enable - inform the system when the set of clks should be running.
449 * @num_clks: the number of clk_bulk_data
450 * @clks: the clk_bulk_data table of consumer
451 *
452 * May be called from atomic contexts.
453 *
454 * Returns success (0) or negative errno.
455 */
456int __must_check clk_bulk_enable(int num_clks,
457 const struct clk_bulk_data *clks);
458
459/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 * clk_disable - inform the system when the clock source is no longer required.
461 * @clk: clock source
Russell Kingf47fc0a2006-01-03 18:34:20 +0000462 *
463 * Inform the system that a clock source is no longer required by
464 * a driver and may be shut down.
465 *
Russell King40d3e0f2011-09-22 11:30:50 +0100466 * May be called from atomic contexts.
467 *
Russell Kingf47fc0a2006-01-03 18:34:20 +0000468 * Implementation detail: if the clock source is shared between
469 * multiple drivers, clk_enable() calls must be balanced by the
470 * same number of clk_disable() calls for the clock source to be
471 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 */
473void clk_disable(struct clk *clk);
474
475/**
Dong Aisheng266e4e92017-05-19 21:49:04 +0800476 * clk_bulk_disable - inform the system when the set of clks is no
477 * longer required.
478 * @num_clks: the number of clk_bulk_data
479 * @clks: the clk_bulk_data table of consumer
480 *
481 * Inform the system that a set of clks is no longer required by
482 * a driver and may be shut down.
483 *
484 * May be called from atomic contexts.
485 *
486 * Implementation detail: if the set of clks is shared between
487 * multiple drivers, clk_bulk_enable() calls must be balanced by the
488 * same number of clk_bulk_disable() calls for the clock source to be
489 * disabled.
490 */
491void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
492
493/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
495 * This is only valid once the clock source has been enabled.
496 * @clk: clock source
497 */
498unsigned long clk_get_rate(struct clk *clk);
499
500/**
501 * clk_put - "free" the clock source
502 * @clk: clock source
Russell Kingf47fc0a2006-01-03 18:34:20 +0000503 *
504 * Note: drivers must ensure that all clk_enable calls made on this
505 * clock source are balanced by clk_disable calls prior to calling
506 * this function.
Alex Raimondif7ad1602008-10-15 22:02:03 -0700507 *
508 * clk_put should not be called from within interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 */
510void clk_put(struct clk *clk);
511
Mark Browna8a97db2012-04-05 11:42:09 +0100512/**
Dong Aisheng266e4e92017-05-19 21:49:04 +0800513 * clk_bulk_put - "free" the clock source
514 * @num_clks: the number of clk_bulk_data
515 * @clks: the clk_bulk_data table of consumer
516 *
517 * Note: drivers must ensure that all clk_bulk_enable calls made on this
518 * clock source are balanced by clk_bulk_disable calls prior to calling
519 * this function.
520 *
521 * clk_bulk_put should not be called from within interrupt context.
522 */
523void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
524
525/**
Dong Aisheng616e45d2018-08-31 12:45:54 +0800526 * clk_bulk_put_all - "free" all the clock source
527 * @num_clks: the number of clk_bulk_data
528 * @clks: the clk_bulk_data table of consumer
529 *
530 * Note: drivers must ensure that all clk_bulk_enable calls made on this
531 * clock source are balanced by clk_bulk_disable calls prior to calling
532 * this function.
533 *
534 * clk_bulk_put_all should not be called from within interrupt context.
535 */
536void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
537
538/**
Mark Browna8a97db2012-04-05 11:42:09 +0100539 * devm_clk_put - "free" a managed clock source
Masanari Iidada3dae52014-09-09 01:27:23 +0900540 * @dev: device used to acquire the clock
Mark Browna8a97db2012-04-05 11:42:09 +0100541 * @clk: clock source acquired with devm_clk_get()
542 *
543 * Note: drivers must ensure that all clk_enable calls made on this
544 * clock source are balanced by clk_disable calls prior to calling
545 * this function.
546 *
547 * clk_put should not be called from within interrupt context.
548 */
549void devm_clk_put(struct device *dev, struct clk *clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
551/*
552 * The remaining APIs are optional for machine class support.
553 */
554
555
556/**
557 * clk_round_rate - adjust a rate to the exact rate a clock can provide
558 * @clk: clock source
559 * @rate: desired clock rate in Hz
560 *
Russell Kingd2d14a72015-03-14 15:12:35 +0000561 * This answers the question "if I were to pass @rate to clk_set_rate(),
562 * what clock rate would I end up with?" without changing the hardware
563 * in any way. In other words:
564 *
565 * rate = clk_round_rate(clk, r);
566 *
567 * and:
568 *
569 * clk_set_rate(clk, r);
570 * rate = clk_get_rate(clk);
571 *
572 * are equivalent except the former does not modify the clock hardware
573 * in any way.
574 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 * Returns rounded clock rate in Hz, or negative errno.
576 */
577long clk_round_rate(struct clk *clk, unsigned long rate);
Rob Herring8b7730d2012-04-09 15:24:59 -0500578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579/**
580 * clk_set_rate - set the clock rate for a clock source
581 * @clk: clock source
582 * @rate: desired clock rate in Hz
583 *
584 * Returns success (0) or negative errno.
585 */
586int clk_set_rate(struct clk *clk, unsigned long rate);
Rob Herring8b7730d2012-04-09 15:24:59 -0500587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588/**
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100589 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
590 * clock source
591 * @clk: clock source
592 * @rate: desired clock rate in Hz
593 *
594 * This helper function allows drivers to atomically set the rate of a producer
595 * and claim exclusivity over the rate control of the producer.
596 *
597 * It is essentially a combination of clk_set_rate() and
598 * clk_rate_exclusite_get(). Caller must balance this call with a call to
599 * clk_rate_exclusive_put()
600 *
601 * Returns success (0) or negative errno.
602 */
603int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
604
605/**
Thierry Reding4e88f3d2015-01-21 17:13:00 +0100606 * clk_has_parent - check if a clock is a possible parent for another
607 * @clk: clock source
608 * @parent: parent clock source
609 *
610 * This function can be used in drivers that need to check that a clock can be
611 * the parent of another without actually changing the parent.
612 *
613 * Returns true if @parent is a possible parent for @clk, false otherwise.
614 */
615bool clk_has_parent(struct clk *clk, struct clk *parent);
616
617/**
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100618 * clk_set_rate_range - set a rate range for a clock source
619 * @clk: clock source
620 * @min: desired minimum clock rate in Hz, inclusive
621 * @max: desired maximum clock rate in Hz, inclusive
622 *
623 * Returns success (0) or negative errno.
624 */
625int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
626
627/**
628 * clk_set_min_rate - set a minimum clock rate for a clock source
629 * @clk: clock source
630 * @rate: desired minimum clock rate in Hz, inclusive
631 *
632 * Returns success (0) or negative errno.
633 */
634int clk_set_min_rate(struct clk *clk, unsigned long rate);
635
636/**
637 * clk_set_max_rate - set a maximum clock rate for a clock source
638 * @clk: clock source
639 * @rate: desired maximum clock rate in Hz, inclusive
640 *
641 * Returns success (0) or negative errno.
642 */
643int clk_set_max_rate(struct clk *clk, unsigned long rate);
644
645/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 * clk_set_parent - set the parent clock source for this clock
647 * @clk: clock source
648 * @parent: parent clock source
649 *
650 * Returns success (0) or negative errno.
651 */
652int clk_set_parent(struct clk *clk, struct clk *parent);
653
654/**
655 * clk_get_parent - get the parent clock source for this clock
656 * @clk: clock source
657 *
658 * Returns struct clk corresponding to parent clock source, or
659 * valid IS_ERR() condition containing errno.
660 */
661struct clk *clk_get_parent(struct clk *clk);
662
Sascha Hauer05fd8e732009-03-07 12:55:49 +0100663/**
664 * clk_get_sys - get a clock based upon the device name
665 * @dev_id: device name
666 * @con_id: connection ID
667 *
668 * Returns a struct clk corresponding to the clock producer, or
669 * valid IS_ERR() condition containing errno. The implementation
670 * uses @dev_id and @con_id to determine the clock consumer, and
671 * thereby the clock producer. In contrast to clk_get() this function
672 * takes the device name instead of the device itself for identification.
673 *
674 * Drivers must assume that the clock source is not enabled.
675 *
676 * clk_get_sys should not be called from within interrupt context.
677 */
678struct clk *clk_get_sys(const char *dev_id, const char *con_id);
679
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700680#else /* !CONFIG_HAVE_CLK */
681
682static inline struct clk *clk_get(struct device *dev, const char *id)
683{
684 return NULL;
685}
686
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800687static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
688 struct clk_bulk_data *clks)
Dong Aisheng266e4e92017-05-19 21:49:04 +0800689{
690 return 0;
691}
692
Dong Aisheng616e45d2018-08-31 12:45:54 +0800693static inline int __must_check clk_bulk_get_all(struct device *dev,
694 struct clk_bulk_data **clks)
695{
696 return 0;
697}
698
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700699static inline struct clk *devm_clk_get(struct device *dev, const char *id)
700{
701 return NULL;
702}
703
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800704static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
705 struct clk_bulk_data *clks)
Dong Aisheng618aee02017-05-19 21:49:05 +0800706{
707 return 0;
708}
709
Dong Aishengf08c2e22018-08-31 12:45:55 +0800710static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
711 struct clk_bulk_data **clks)
712{
713
714 return 0;
715}
716
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000717static inline struct clk *devm_get_clk_from_child(struct device *dev,
718 struct device_node *np, const char *con_id)
719{
720 return NULL;
721}
722
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700723static inline void clk_put(struct clk *clk) {}
724
Dong Aisheng266e4e92017-05-19 21:49:04 +0800725static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
726
Dong Aisheng616e45d2018-08-31 12:45:54 +0800727static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
728
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700729static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
730
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100731
732static inline int clk_rate_exclusive_get(struct clk *clk)
733{
734 return 0;
735}
736
737static inline void clk_rate_exclusive_put(struct clk *clk) {}
738
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700739static inline int clk_enable(struct clk *clk)
740{
741 return 0;
742}
743
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800744static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
Dong Aisheng266e4e92017-05-19 21:49:04 +0800745{
746 return 0;
747}
748
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700749static inline void clk_disable(struct clk *clk) {}
750
Dong Aisheng266e4e92017-05-19 21:49:04 +0800751
752static inline void clk_bulk_disable(int num_clks,
753 struct clk_bulk_data *clks) {}
754
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700755static inline unsigned long clk_get_rate(struct clk *clk)
756{
757 return 0;
758}
759
760static inline int clk_set_rate(struct clk *clk, unsigned long rate)
761{
762 return 0;
763}
764
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100765static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
766{
767 return 0;
768}
769
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700770static inline long clk_round_rate(struct clk *clk, unsigned long rate)
771{
772 return 0;
773}
774
Thierry Reding4e88f3d2015-01-21 17:13:00 +0100775static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
776{
777 return true;
778}
779
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700780static inline int clk_set_parent(struct clk *clk, struct clk *parent)
781{
782 return 0;
783}
784
785static inline struct clk *clk_get_parent(struct clk *clk)
786{
787 return NULL;
788}
789
Daniel Lezcanob81ea962016-06-02 22:44:49 +0200790static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
791{
792 return NULL;
793}
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700794#endif
795
796/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
797static inline int clk_prepare_enable(struct clk *clk)
798{
799 int ret;
800
801 ret = clk_prepare(clk);
802 if (ret)
803 return ret;
804 ret = clk_enable(clk);
805 if (ret)
806 clk_unprepare(clk);
807
808 return ret;
809}
810
811/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
812static inline void clk_disable_unprepare(struct clk *clk)
813{
814 clk_disable(clk);
815 clk_unprepare(clk);
816}
817
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800818static inline int __must_check clk_bulk_prepare_enable(int num_clks,
819 struct clk_bulk_data *clks)
Bjorn Andersson3c48d862017-07-12 15:04:16 -0700820{
821 int ret;
822
823 ret = clk_bulk_prepare(num_clks, clks);
824 if (ret)
825 return ret;
826 ret = clk_bulk_enable(num_clks, clks);
827 if (ret)
828 clk_bulk_unprepare(num_clks, clks);
829
830 return ret;
831}
832
833static inline void clk_bulk_disable_unprepare(int num_clks,
834 struct clk_bulk_data *clks)
835{
836 clk_bulk_disable(num_clks, clks);
837 clk_bulk_unprepare(num_clks, clks);
838}
839
Rob Herring137f8a72012-07-18 11:52:23 +0800840#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
Grant Likely766e6a42012-04-09 14:50:06 -0500841struct clk *of_clk_get(struct device_node *np, int index);
842struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
843struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
844#else
845static inline struct clk *of_clk_get(struct device_node *np, int index)
846{
Shawn Guo9f1612d2012-07-18 11:52:22 +0800847 return ERR_PTR(-ENOENT);
Grant Likely766e6a42012-04-09 14:50:06 -0500848}
849static inline struct clk *of_clk_get_by_name(struct device_node *np,
850 const char *name)
851{
Shawn Guo9f1612d2012-07-18 11:52:22 +0800852 return ERR_PTR(-ENOENT);
Grant Likely766e6a42012-04-09 14:50:06 -0500853}
Geert Uytterhoeven428c9de2017-04-28 15:08:53 +0200854static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
855{
856 return ERR_PTR(-ENOENT);
857}
Grant Likely766e6a42012-04-09 14:50:06 -0500858#endif
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860#endif