blob: ab505247887013d8b5eac570c996e96627165f26 [file] [log] [blame]
Heiko Stübnera245fec2014-07-03 01:58:39 +02001/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
5 * based on
6 *
7 * samsung/clk.c
8 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
9 * Copyright (c) 2013 Linaro Ltd.
10 * Author: Thomas Abraham <thomas.ab@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/slab.h>
24#include <linux/clk.h>
25#include <linux/clk-provider.h>
Heiko Stübner90c59022014-07-03 01:59:10 +020026#include <linux/mfd/syscon.h>
27#include <linux/regmap.h>
Heiko Stübner6f1294b2014-08-19 17:45:38 -070028#include <linux/reboot.h>
Heiko Stübnera245fec2014-07-03 01:58:39 +020029#include "clk.h"
30
31/**
32 * Register a clock branch.
33 * Most clock branches have a form like
34 *
35 * src1 --|--\
36 * |M |--[GATE]-[DIV]-
37 * src2 --|--/
38 *
39 * sometimes without one of those components.
40 */
Heiko Stübner1a4b1812014-08-27 00:54:56 +020041static struct clk *rockchip_clk_register_branch(const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +020042 const char *const *parent_names, u8 num_parents, void __iomem *base,
Heiko Stübnera245fec2014-07-03 01:58:39 +020043 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
44 u8 div_shift, u8 div_width, u8 div_flags,
45 struct clk_div_table *div_table, int gate_offset,
46 u8 gate_shift, u8 gate_flags, unsigned long flags,
47 spinlock_t *lock)
48{
49 struct clk *clk;
50 struct clk_mux *mux = NULL;
51 struct clk_gate *gate = NULL;
52 struct clk_divider *div = NULL;
53 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54 *gate_ops = NULL;
55
56 if (num_parents > 1) {
57 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
58 if (!mux)
59 return ERR_PTR(-ENOMEM);
60
61 mux->reg = base + muxdiv_offset;
62 mux->shift = mux_shift;
63 mux->mask = BIT(mux_width) - 1;
64 mux->flags = mux_flags;
65 mux->lock = lock;
66 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
67 : &clk_mux_ops;
68 }
69
70 if (gate_offset >= 0) {
71 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
72 if (!gate)
Shawn Lin2467b672016-02-02 11:37:50 +080073 goto err_gate;
Heiko Stübnera245fec2014-07-03 01:58:39 +020074
75 gate->flags = gate_flags;
76 gate->reg = base + gate_offset;
77 gate->bit_idx = gate_shift;
78 gate->lock = lock;
79 gate_ops = &clk_gate_ops;
80 }
81
82 if (div_width > 0) {
83 div = kzalloc(sizeof(*div), GFP_KERNEL);
84 if (!div)
Shawn Lin2467b672016-02-02 11:37:50 +080085 goto err_div;
Heiko Stübnera245fec2014-07-03 01:58:39 +020086
87 div->flags = div_flags;
88 div->reg = base + muxdiv_offset;
89 div->shift = div_shift;
90 div->width = div_width;
91 div->lock = lock;
92 div->table = div_table;
James Hogane6d5e7d2014-11-14 15:32:09 +000093 div_ops = &clk_divider_ops;
Heiko Stübnera245fec2014-07-03 01:58:39 +020094 }
95
96 clk = clk_register_composite(NULL, name, parent_names, num_parents,
97 mux ? &mux->hw : NULL, mux_ops,
98 div ? &div->hw : NULL, div_ops,
99 gate ? &gate->hw : NULL, gate_ops,
100 flags);
101
102 return clk;
Shawn Lin2467b672016-02-02 11:37:50 +0800103err_div:
104 kfree(gate);
105err_gate:
106 kfree(mux);
107 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200108}
109
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100110struct rockchip_clk_frac {
111 struct notifier_block clk_nb;
112 struct clk_fractional_divider div;
113 struct clk_gate gate;
114
115 struct clk_mux mux;
116 const struct clk_ops *mux_ops;
117 int mux_frac_idx;
118
119 bool rate_change_remuxed;
120 int rate_change_idx;
121};
122
123#define to_rockchip_clk_frac_nb(nb) \
124 container_of(nb, struct rockchip_clk_frac, clk_nb)
125
126static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
127 unsigned long event, void *data)
128{
129 struct clk_notifier_data *ndata = data;
130 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
131 struct clk_mux *frac_mux = &frac->mux;
132 int ret = 0;
133
134 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
135 __func__, event, ndata->old_rate, ndata->new_rate);
136 if (event == PRE_RATE_CHANGE) {
137 frac->rate_change_idx = frac->mux_ops->get_parent(&frac_mux->hw);
138 if (frac->rate_change_idx != frac->mux_frac_idx) {
139 frac->mux_ops->set_parent(&frac_mux->hw, frac->mux_frac_idx);
140 frac->rate_change_remuxed = 1;
141 }
142 } else if (event == POST_RATE_CHANGE) {
143 /*
144 * The POST_RATE_CHANGE notifier runs directly after the
145 * divider clock is set in clk_change_rate, so we'll have
146 * remuxed back to the original parent before clk_change_rate
147 * reaches the mux itself.
148 */
149 if (frac->rate_change_remuxed) {
150 frac->mux_ops->set_parent(&frac_mux->hw, frac->rate_change_idx);
151 frac->rate_change_remuxed = 0;
152 }
153 }
154
155 return notifier_from_errno(ret);
156}
157
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200158static struct clk *rockchip_clk_register_frac_branch(const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200159 const char *const *parent_names, u8 num_parents,
160 void __iomem *base, int muxdiv_offset, u8 div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200161 int gate_offset, u8 gate_shift, u8 gate_flags,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100162 unsigned long flags, struct rockchip_clk_branch *child,
163 spinlock_t *lock)
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200164{
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100165 struct rockchip_clk_frac *frac;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200166 struct clk *clk;
167 struct clk_gate *gate = NULL;
168 struct clk_fractional_divider *div = NULL;
169 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
170
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100171 if (muxdiv_offset < 0)
172 return ERR_PTR(-EINVAL);
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200173
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100174 if (child && child->branch_type != branch_mux) {
175 pr_err("%s: fractional child clock for %s can only be a mux\n",
176 __func__, name);
177 return ERR_PTR(-EINVAL);
178 }
179
180 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
181 if (!frac)
182 return ERR_PTR(-ENOMEM);
183
184 if (gate_offset >= 0) {
185 gate = &frac->gate;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200186 gate->flags = gate_flags;
187 gate->reg = base + gate_offset;
188 gate->bit_idx = gate_shift;
189 gate->lock = lock;
190 gate_ops = &clk_gate_ops;
191 }
192
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100193 div = &frac->div;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200194 div->flags = div_flags;
195 div->reg = base + muxdiv_offset;
196 div->mshift = 16;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300197 div->mwidth = 16;
198 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200199 div->nshift = 0;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300200 div->nwidth = 16;
201 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200202 div->lock = lock;
203 div_ops = &clk_fractional_divider_ops;
204
205 clk = clk_register_composite(NULL, name, parent_names, num_parents,
206 NULL, NULL,
207 &div->hw, div_ops,
208 gate ? &gate->hw : NULL, gate_ops,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100209 flags | CLK_SET_RATE_UNGATE);
210 if (IS_ERR(clk)) {
211 kfree(frac);
212 return clk;
213 }
214
215 if (child) {
216 struct clk_mux *frac_mux = &frac->mux;
217 struct clk_init_data init;
218 struct clk *mux_clk;
219 int i, ret;
220
221 frac->mux_frac_idx = -1;
222 for (i = 0; i < child->num_parents; i++) {
223 if (!strcmp(name, child->parent_names[i])) {
224 pr_debug("%s: found fractional parent in mux at pos %d\n",
225 __func__, i);
226 frac->mux_frac_idx = i;
227 break;
228 }
229 }
230
231 frac->mux_ops = &clk_mux_ops;
232 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
233
234 frac_mux->reg = base + child->muxdiv_offset;
235 frac_mux->shift = child->mux_shift;
236 frac_mux->mask = BIT(child->mux_width) - 1;
237 frac_mux->flags = child->mux_flags;
238 frac_mux->lock = lock;
239 frac_mux->hw.init = &init;
240
241 init.name = child->name;
242 init.flags = child->flags | CLK_SET_RATE_PARENT;
243 init.ops = frac->mux_ops;
244 init.parent_names = child->parent_names;
245 init.num_parents = child->num_parents;
246
247 mux_clk = clk_register(NULL, &frac_mux->hw);
248 if (IS_ERR(mux_clk))
249 return clk;
250
251 rockchip_clk_add_lookup(mux_clk, child->id);
252
253 /* notifier on the fraction divider to catch rate changes */
254 if (frac->mux_frac_idx >= 0) {
255 ret = clk_notifier_register(clk, &frac->clk_nb);
256 if (ret)
257 pr_err("%s: failed to register clock notifier for %s\n",
258 __func__, name);
259 } else {
260 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
261 __func__, name, child->name);
262 }
263 }
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200264
265 return clk;
266}
267
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200268static struct clk *rockchip_clk_register_factor_branch(const char *name,
269 const char *const *parent_names, u8 num_parents,
270 void __iomem *base, unsigned int mult, unsigned int div,
271 int gate_offset, u8 gate_shift, u8 gate_flags,
272 unsigned long flags, spinlock_t *lock)
273{
274 struct clk *clk;
275 struct clk_gate *gate = NULL;
276 struct clk_fixed_factor *fix = NULL;
277
278 /* without gate, register a simple factor clock */
279 if (gate_offset == 0) {
280 return clk_register_fixed_factor(NULL, name,
281 parent_names[0], flags, mult,
282 div);
283 }
284
285 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
286 if (!gate)
287 return ERR_PTR(-ENOMEM);
288
289 gate->flags = gate_flags;
290 gate->reg = base + gate_offset;
291 gate->bit_idx = gate_shift;
292 gate->lock = lock;
293
294 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
295 if (!fix) {
296 kfree(gate);
297 return ERR_PTR(-ENOMEM);
298 }
299
300 fix->mult = mult;
301 fix->div = div;
302
303 clk = clk_register_composite(NULL, name, parent_names, num_parents,
304 NULL, NULL,
305 &fix->hw, &clk_fixed_factor_ops,
306 &gate->hw, &clk_gate_ops, flags);
307 if (IS_ERR(clk)) {
308 kfree(fix);
309 kfree(gate);
310 }
311
312 return clk;
313}
314
Heiko Stübnera245fec2014-07-03 01:58:39 +0200315static DEFINE_SPINLOCK(clk_lock);
316static struct clk **clk_table;
317static void __iomem *reg_base;
318static struct clk_onecell_data clk_data;
Heiko Stübner90c59022014-07-03 01:59:10 +0200319static struct device_node *cru_node;
320static struct regmap *grf;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200321
322void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
323 unsigned long nr_clks)
324{
325 reg_base = base;
Heiko Stübner90c59022014-07-03 01:59:10 +0200326 cru_node = np;
327 grf = ERR_PTR(-EPROBE_DEFER);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200328
329 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
330 if (!clk_table)
331 pr_err("%s: could not allocate clock lookup table\n", __func__);
332
333 clk_data.clks = clk_table;
334 clk_data.clk_num = nr_clks;
335 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
336}
337
Heiko Stübner90c59022014-07-03 01:59:10 +0200338struct regmap *rockchip_clk_get_grf(void)
339{
340 if (IS_ERR(grf))
341 grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
342 return grf;
343}
344
Heiko Stübnera245fec2014-07-03 01:58:39 +0200345void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
346{
347 if (clk_table && id)
348 clk_table[id] = clk;
349}
350
Heiko Stübner90c59022014-07-03 01:59:10 +0200351void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
352 unsigned int nr_pll, int grf_lock_offset)
353{
354 struct clk *clk;
355 int idx;
356
357 for (idx = 0; idx < nr_pll; idx++, list++) {
358 clk = rockchip_clk_register_pll(list->type, list->name,
359 list->parent_names, list->num_parents,
360 reg_base, list->con_offset, grf_lock_offset,
361 list->lock_shift, list->mode_offset,
Heiko Stuebner4f8a7c52014-11-20 20:38:50 +0100362 list->mode_shift, list->rate_table,
363 list->pll_flags, &clk_lock);
Heiko Stübner90c59022014-07-03 01:59:10 +0200364 if (IS_ERR(clk)) {
365 pr_err("%s: failed to register clock %s\n", __func__,
366 list->name);
367 continue;
368 }
369
370 rockchip_clk_add_lookup(clk, list->id);
371 }
372}
373
Heiko Stübnera245fec2014-07-03 01:58:39 +0200374void __init rockchip_clk_register_branches(
375 struct rockchip_clk_branch *list,
376 unsigned int nr_clk)
377{
378 struct clk *clk = NULL;
379 unsigned int idx;
380 unsigned long flags;
381
382 for (idx = 0; idx < nr_clk; idx++, list++) {
383 flags = list->flags;
384
385 /* catch simple muxes */
386 switch (list->branch_type) {
387 case branch_mux:
388 clk = clk_register_mux(NULL, list->name,
389 list->parent_names, list->num_parents,
390 flags, reg_base + list->muxdiv_offset,
391 list->mux_shift, list->mux_width,
392 list->mux_flags, &clk_lock);
393 break;
394 case branch_divider:
395 if (list->div_table)
396 clk = clk_register_divider_table(NULL,
397 list->name, list->parent_names[0],
398 flags, reg_base + list->muxdiv_offset,
399 list->div_shift, list->div_width,
400 list->div_flags, list->div_table,
401 &clk_lock);
402 else
403 clk = clk_register_divider(NULL, list->name,
404 list->parent_names[0], flags,
405 reg_base + list->muxdiv_offset,
406 list->div_shift, list->div_width,
407 list->div_flags, &clk_lock);
408 break;
409 case branch_fraction_divider:
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200410 clk = rockchip_clk_register_frac_branch(list->name,
411 list->parent_names, list->num_parents,
412 reg_base, list->muxdiv_offset, list->div_flags,
413 list->gate_offset, list->gate_shift,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100414 list->gate_flags, flags, list->child,
415 &clk_lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200416 break;
417 case branch_gate:
418 flags |= CLK_SET_RATE_PARENT;
419
Heiko Stübnera245fec2014-07-03 01:58:39 +0200420 clk = clk_register_gate(NULL, list->name,
421 list->parent_names[0], flags,
422 reg_base + list->gate_offset,
423 list->gate_shift, list->gate_flags, &clk_lock);
424 break;
425 case branch_composite:
Heiko Stübnera245fec2014-07-03 01:58:39 +0200426 clk = rockchip_clk_register_branch(list->name,
427 list->parent_names, list->num_parents,
428 reg_base, list->muxdiv_offset, list->mux_shift,
429 list->mux_width, list->mux_flags,
430 list->div_shift, list->div_width,
431 list->div_flags, list->div_table,
432 list->gate_offset, list->gate_shift,
433 list->gate_flags, flags, &clk_lock);
434 break;
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800435 case branch_mmc:
436 clk = rockchip_clk_register_mmc(
437 list->name,
438 list->parent_names, list->num_parents,
439 reg_base + list->muxdiv_offset,
440 list->div_shift
441 );
442 break;
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200443 case branch_inverter:
444 clk = rockchip_clk_register_inverter(
445 list->name, list->parent_names,
446 list->num_parents,
447 reg_base + list->muxdiv_offset,
448 list->div_shift, list->div_flags, &clk_lock);
449 break;
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200450 case branch_factor:
451 clk = rockchip_clk_register_factor_branch(
452 list->name, list->parent_names,
453 list->num_parents, reg_base,
454 list->div_shift, list->div_width,
455 list->gate_offset, list->gate_shift,
456 list->gate_flags, flags, &clk_lock);
457 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200458 }
459
460 /* none of the cases above matched */
461 if (!clk) {
462 pr_err("%s: unknown clock type %d\n",
463 __func__, list->branch_type);
464 continue;
465 }
466
467 if (IS_ERR(clk)) {
468 pr_err("%s: failed to register clock %s: %ld\n",
469 __func__, list->name, PTR_ERR(clk));
470 continue;
471 }
472
473 rockchip_clk_add_lookup(clk, list->id);
474 }
475}
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200476
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200477void __init rockchip_clk_register_armclk(unsigned int lookup_id,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200478 const char *name, const char *const *parent_names,
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200479 u8 num_parents,
480 const struct rockchip_cpuclk_reg_data *reg_data,
481 const struct rockchip_cpuclk_rate_table *rates,
482 int nrates)
483{
484 struct clk *clk;
485
486 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
487 reg_data, rates, nrates, reg_base,
488 &clk_lock);
489 if (IS_ERR(clk)) {
490 pr_err("%s: failed to register clock %s: %ld\n",
491 __func__, name, PTR_ERR(clk));
492 return;
493 }
494
495 rockchip_clk_add_lookup(clk, lookup_id);
496}
497
Uwe Kleine-König692d8322015-02-18 10:59:45 +0100498void __init rockchip_clk_protect_critical(const char *const clocks[],
499 int nclocks)
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200500{
501 int i;
502
503 /* Protect the clocks that needs to stay on */
504 for (i = 0; i < nclocks; i++) {
505 struct clk *clk = __clk_lookup(clocks[i]);
506
507 if (clk)
508 clk_prepare_enable(clk);
509 }
510}
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700511
512static unsigned int reg_restart;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100513static void (*cb_restart)(void);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700514static int rockchip_restart_notify(struct notifier_block *this,
515 unsigned long mode, void *cmd)
516{
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100517 if (cb_restart)
518 cb_restart();
519
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700520 writel(0xfdb9, reg_base + reg_restart);
521 return NOTIFY_DONE;
522}
523
524static struct notifier_block rockchip_restart_handler = {
525 .notifier_call = rockchip_restart_notify,
526 .priority = 128,
527};
528
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100529void __init rockchip_register_restart_notifier(unsigned int reg, void (*cb)(void))
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700530{
531 int ret;
532
533 reg_restart = reg;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100534 cb_restart = cb;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700535 ret = register_restart_handler(&rockchip_restart_handler);
536 if (ret)
537 pr_err("%s: cannot register restart handler, %d\n",
538 __func__, ret);
539}