blob: fe1d393cf678fb9079127cb77d237aa4146615ac [file] [log] [blame]
Heiko Stübnera245fec2014-07-03 01:58:39 +02001/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
Xing Zhengef1d9fe2016-03-09 10:37:04 +08005 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
6 * Author: Xing Zheng <zhengxing@rock-chips.com>
7 *
Heiko Stübnera245fec2014-07-03 01:58:39 +02008 * based on
9 *
10 * samsung/clk.c
11 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
12 * Copyright (c) 2013 Linaro Ltd.
13 * Author: Thomas Abraham <thomas.ab@samsung.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/slab.h>
27#include <linux/clk.h>
28#include <linux/clk-provider.h>
Heiko Stübner90c59022014-07-03 01:59:10 +020029#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
Heiko Stübner6f1294b2014-08-19 17:45:38 -070031#include <linux/reboot.h>
Heiko Stübnera245fec2014-07-03 01:58:39 +020032#include "clk.h"
33
34/**
35 * Register a clock branch.
36 * Most clock branches have a form like
37 *
38 * src1 --|--\
39 * |M |--[GATE]-[DIV]-
40 * src2 --|--/
41 *
42 * sometimes without one of those components.
43 */
Heiko Stübner1a4b1812014-08-27 00:54:56 +020044static struct clk *rockchip_clk_register_branch(const char *name,
Heiko Stuebner03ae1742016-04-19 21:29:27 +020045 const char *const *parent_names, u8 num_parents,
46 void __iomem *base,
Heiko Stübnera245fec2014-07-03 01:58:39 +020047 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
48 u8 div_shift, u8 div_width, u8 div_flags,
49 struct clk_div_table *div_table, int gate_offset,
50 u8 gate_shift, u8 gate_flags, unsigned long flags,
51 spinlock_t *lock)
52{
53 struct clk *clk;
54 struct clk_mux *mux = NULL;
55 struct clk_gate *gate = NULL;
56 struct clk_divider *div = NULL;
57 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
58 *gate_ops = NULL;
59
60 if (num_parents > 1) {
61 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
62 if (!mux)
63 return ERR_PTR(-ENOMEM);
64
65 mux->reg = base + muxdiv_offset;
66 mux->shift = mux_shift;
67 mux->mask = BIT(mux_width) - 1;
68 mux->flags = mux_flags;
69 mux->lock = lock;
70 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
71 : &clk_mux_ops;
72 }
73
74 if (gate_offset >= 0) {
75 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
76 if (!gate)
Shawn Lin2467b672016-02-02 11:37:50 +080077 goto err_gate;
Heiko Stübnera245fec2014-07-03 01:58:39 +020078
79 gate->flags = gate_flags;
80 gate->reg = base + gate_offset;
81 gate->bit_idx = gate_shift;
82 gate->lock = lock;
83 gate_ops = &clk_gate_ops;
84 }
85
86 if (div_width > 0) {
87 div = kzalloc(sizeof(*div), GFP_KERNEL);
88 if (!div)
Shawn Lin2467b672016-02-02 11:37:50 +080089 goto err_div;
Heiko Stübnera245fec2014-07-03 01:58:39 +020090
91 div->flags = div_flags;
92 div->reg = base + muxdiv_offset;
93 div->shift = div_shift;
94 div->width = div_width;
95 div->lock = lock;
96 div->table = div_table;
Heiko Stuebner50359812016-01-21 21:53:09 +010097 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
98 ? &clk_divider_ro_ops
99 : &clk_divider_ops;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200100 }
101
102 clk = clk_register_composite(NULL, name, parent_names, num_parents,
103 mux ? &mux->hw : NULL, mux_ops,
104 div ? &div->hw : NULL, div_ops,
105 gate ? &gate->hw : NULL, gate_ops,
106 flags);
107
108 return clk;
Shawn Lin2467b672016-02-02 11:37:50 +0800109err_div:
110 kfree(gate);
111err_gate:
112 kfree(mux);
113 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200114}
115
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100116struct rockchip_clk_frac {
117 struct notifier_block clk_nb;
118 struct clk_fractional_divider div;
119 struct clk_gate gate;
120
121 struct clk_mux mux;
122 const struct clk_ops *mux_ops;
123 int mux_frac_idx;
124
125 bool rate_change_remuxed;
126 int rate_change_idx;
127};
128
129#define to_rockchip_clk_frac_nb(nb) \
130 container_of(nb, struct rockchip_clk_frac, clk_nb)
131
132static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
133 unsigned long event, void *data)
134{
135 struct clk_notifier_data *ndata = data;
136 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
137 struct clk_mux *frac_mux = &frac->mux;
138 int ret = 0;
139
140 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
141 __func__, event, ndata->old_rate, ndata->new_rate);
142 if (event == PRE_RATE_CHANGE) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200143 frac->rate_change_idx =
144 frac->mux_ops->get_parent(&frac_mux->hw);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100145 if (frac->rate_change_idx != frac->mux_frac_idx) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200146 frac->mux_ops->set_parent(&frac_mux->hw,
147 frac->mux_frac_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100148 frac->rate_change_remuxed = 1;
149 }
150 } else if (event == POST_RATE_CHANGE) {
151 /*
152 * The POST_RATE_CHANGE notifier runs directly after the
153 * divider clock is set in clk_change_rate, so we'll have
154 * remuxed back to the original parent before clk_change_rate
155 * reaches the mux itself.
156 */
157 if (frac->rate_change_remuxed) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200158 frac->mux_ops->set_parent(&frac_mux->hw,
159 frac->rate_change_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100160 frac->rate_change_remuxed = 0;
161 }
162 }
163
164 return notifier_from_errno(ret);
165}
166
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800167static struct clk *rockchip_clk_register_frac_branch(
168 struct rockchip_clk_provider *ctx, const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200169 const char *const *parent_names, u8 num_parents,
170 void __iomem *base, int muxdiv_offset, u8 div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200171 int gate_offset, u8 gate_shift, u8 gate_flags,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100172 unsigned long flags, struct rockchip_clk_branch *child,
173 spinlock_t *lock)
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200174{
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100175 struct rockchip_clk_frac *frac;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200176 struct clk *clk;
177 struct clk_gate *gate = NULL;
178 struct clk_fractional_divider *div = NULL;
179 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
180
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100181 if (muxdiv_offset < 0)
182 return ERR_PTR(-EINVAL);
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200183
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100184 if (child && child->branch_type != branch_mux) {
185 pr_err("%s: fractional child clock for %s can only be a mux\n",
186 __func__, name);
187 return ERR_PTR(-EINVAL);
188 }
189
190 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
191 if (!frac)
192 return ERR_PTR(-ENOMEM);
193
194 if (gate_offset >= 0) {
195 gate = &frac->gate;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200196 gate->flags = gate_flags;
197 gate->reg = base + gate_offset;
198 gate->bit_idx = gate_shift;
199 gate->lock = lock;
200 gate_ops = &clk_gate_ops;
201 }
202
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100203 div = &frac->div;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200204 div->flags = div_flags;
205 div->reg = base + muxdiv_offset;
206 div->mshift = 16;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300207 div->mwidth = 16;
208 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200209 div->nshift = 0;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300210 div->nwidth = 16;
211 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200212 div->lock = lock;
213 div_ops = &clk_fractional_divider_ops;
214
215 clk = clk_register_composite(NULL, name, parent_names, num_parents,
216 NULL, NULL,
217 &div->hw, div_ops,
218 gate ? &gate->hw : NULL, gate_ops,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100219 flags | CLK_SET_RATE_UNGATE);
220 if (IS_ERR(clk)) {
221 kfree(frac);
222 return clk;
223 }
224
225 if (child) {
226 struct clk_mux *frac_mux = &frac->mux;
227 struct clk_init_data init;
228 struct clk *mux_clk;
229 int i, ret;
230
231 frac->mux_frac_idx = -1;
232 for (i = 0; i < child->num_parents; i++) {
233 if (!strcmp(name, child->parent_names[i])) {
234 pr_debug("%s: found fractional parent in mux at pos %d\n",
235 __func__, i);
236 frac->mux_frac_idx = i;
237 break;
238 }
239 }
240
241 frac->mux_ops = &clk_mux_ops;
242 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
243
244 frac_mux->reg = base + child->muxdiv_offset;
245 frac_mux->shift = child->mux_shift;
246 frac_mux->mask = BIT(child->mux_width) - 1;
247 frac_mux->flags = child->mux_flags;
248 frac_mux->lock = lock;
249 frac_mux->hw.init = &init;
250
251 init.name = child->name;
252 init.flags = child->flags | CLK_SET_RATE_PARENT;
253 init.ops = frac->mux_ops;
254 init.parent_names = child->parent_names;
255 init.num_parents = child->num_parents;
256
257 mux_clk = clk_register(NULL, &frac_mux->hw);
258 if (IS_ERR(mux_clk))
259 return clk;
260
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800261 rockchip_clk_add_lookup(ctx, mux_clk, child->id);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100262
263 /* notifier on the fraction divider to catch rate changes */
264 if (frac->mux_frac_idx >= 0) {
265 ret = clk_notifier_register(clk, &frac->clk_nb);
266 if (ret)
267 pr_err("%s: failed to register clock notifier for %s\n",
268 __func__, name);
269 } else {
270 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
271 __func__, name, child->name);
272 }
273 }
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200274
275 return clk;
276}
277
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200278static struct clk *rockchip_clk_register_factor_branch(const char *name,
279 const char *const *parent_names, u8 num_parents,
280 void __iomem *base, unsigned int mult, unsigned int div,
281 int gate_offset, u8 gate_shift, u8 gate_flags,
282 unsigned long flags, spinlock_t *lock)
283{
284 struct clk *clk;
285 struct clk_gate *gate = NULL;
286 struct clk_fixed_factor *fix = NULL;
287
288 /* without gate, register a simple factor clock */
289 if (gate_offset == 0) {
290 return clk_register_fixed_factor(NULL, name,
291 parent_names[0], flags, mult,
292 div);
293 }
294
295 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
296 if (!gate)
297 return ERR_PTR(-ENOMEM);
298
299 gate->flags = gate_flags;
300 gate->reg = base + gate_offset;
301 gate->bit_idx = gate_shift;
302 gate->lock = lock;
303
304 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
305 if (!fix) {
306 kfree(gate);
307 return ERR_PTR(-ENOMEM);
308 }
309
310 fix->mult = mult;
311 fix->div = div;
312
313 clk = clk_register_composite(NULL, name, parent_names, num_parents,
314 NULL, NULL,
315 &fix->hw, &clk_fixed_factor_ops,
316 &gate->hw, &clk_gate_ops, flags);
317 if (IS_ERR(clk)) {
318 kfree(fix);
319 kfree(gate);
320 }
321
322 return clk;
323}
324
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800325struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
326 void __iomem *base, unsigned long nr_clks)
Heiko Stübnera245fec2014-07-03 01:58:39 +0200327{
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800328 struct rockchip_clk_provider *ctx;
329 struct clk **clk_table;
330 int i;
331
332 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200333 if (!ctx)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800334 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200335
336 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200337 if (!clk_table)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800338 goto err_free;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200339
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800340 for (i = 0; i < nr_clks; ++i)
341 clk_table[i] = ERR_PTR(-ENOENT);
342
343 ctx->reg_base = base;
344 ctx->clk_data.clks = clk_table;
345 ctx->clk_data.clk_num = nr_clks;
346 ctx->cru_node = np;
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800347 spin_lock_init(&ctx->lock);
348
Heiko Stuebner6f339dc2016-03-15 16:40:32 +0100349 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
350 "rockchip,grf");
351
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800352 return ctx;
353
354err_free:
355 kfree(ctx);
356 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200357}
358
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800359void __init rockchip_clk_of_add_provider(struct device_node *np,
360 struct rockchip_clk_provider *ctx)
Heiko Stübner90c59022014-07-03 01:59:10 +0200361{
Shawn Linff1ae202016-03-13 00:25:53 +0800362 if (of_clk_add_provider(np, of_clk_src_onecell_get,
363 &ctx->clk_data))
364 pr_err("%s: could not register clk provider\n", __func__);
Heiko Stübner90c59022014-07-03 01:59:10 +0200365}
366
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800367void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
368 struct clk *clk, unsigned int id)
369{
370 if (ctx->clk_data.clks && id)
371 ctx->clk_data.clks[id] = clk;
372}
373
374void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
375 struct rockchip_pll_clock *list,
Heiko Stübner90c59022014-07-03 01:59:10 +0200376 unsigned int nr_pll, int grf_lock_offset)
377{
378 struct clk *clk;
379 int idx;
380
381 for (idx = 0; idx < nr_pll; idx++, list++) {
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800382 clk = rockchip_clk_register_pll(ctx, list->type, list->name,
Heiko Stübner90c59022014-07-03 01:59:10 +0200383 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800384 list->con_offset, grf_lock_offset,
Heiko Stübner90c59022014-07-03 01:59:10 +0200385 list->lock_shift, list->mode_offset,
Heiko Stuebner4f8a7c52014-11-20 20:38:50 +0100386 list->mode_shift, list->rate_table,
Heiko Stübnere6cebc72016-07-29 15:56:55 +0800387 list->flags, list->pll_flags);
Heiko Stübner90c59022014-07-03 01:59:10 +0200388 if (IS_ERR(clk)) {
389 pr_err("%s: failed to register clock %s\n", __func__,
390 list->name);
391 continue;
392 }
393
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800394 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübner90c59022014-07-03 01:59:10 +0200395 }
396}
397
Heiko Stübnera245fec2014-07-03 01:58:39 +0200398void __init rockchip_clk_register_branches(
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800399 struct rockchip_clk_provider *ctx,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200400 struct rockchip_clk_branch *list,
401 unsigned int nr_clk)
402{
403 struct clk *clk = NULL;
404 unsigned int idx;
405 unsigned long flags;
406
407 for (idx = 0; idx < nr_clk; idx++, list++) {
408 flags = list->flags;
409
410 /* catch simple muxes */
411 switch (list->branch_type) {
412 case branch_mux:
413 clk = clk_register_mux(NULL, list->name,
414 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800415 flags, ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200416 list->mux_shift, list->mux_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800417 list->mux_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200418 break;
Heiko Stuebnercb1d9f62016-12-27 00:00:38 +0100419 case branch_muxgrf:
420 clk = rockchip_clk_register_muxgrf(list->name,
421 list->parent_names, list->num_parents,
422 flags, ctx->grf, list->muxdiv_offset,
423 list->mux_shift, list->mux_width,
424 list->mux_flags);
425 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200426 case branch_divider:
427 if (list->div_table)
428 clk = clk_register_divider_table(NULL,
429 list->name, list->parent_names[0],
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200430 flags,
431 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200432 list->div_shift, list->div_width,
433 list->div_flags, list->div_table,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800434 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200435 else
436 clk = clk_register_divider(NULL, list->name,
437 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800438 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200439 list->div_shift, list->div_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800440 list->div_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200441 break;
442 case branch_fraction_divider:
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800443 clk = rockchip_clk_register_frac_branch(ctx, list->name,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200444 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200445 ctx->reg_base, list->muxdiv_offset,
446 list->div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200447 list->gate_offset, list->gate_shift,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100448 list->gate_flags, flags, list->child,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800449 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200450 break;
451 case branch_gate:
452 flags |= CLK_SET_RATE_PARENT;
453
Heiko Stübnera245fec2014-07-03 01:58:39 +0200454 clk = clk_register_gate(NULL, list->name,
455 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800456 ctx->reg_base + list->gate_offset,
457 list->gate_shift, list->gate_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200458 break;
459 case branch_composite:
Heiko Stübnera245fec2014-07-03 01:58:39 +0200460 clk = rockchip_clk_register_branch(list->name,
461 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200462 ctx->reg_base, list->muxdiv_offset,
463 list->mux_shift,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200464 list->mux_width, list->mux_flags,
465 list->div_shift, list->div_width,
466 list->div_flags, list->div_table,
467 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800468 list->gate_flags, flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200469 break;
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800470 case branch_mmc:
471 clk = rockchip_clk_register_mmc(
472 list->name,
473 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800474 ctx->reg_base + list->muxdiv_offset,
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800475 list->div_shift
476 );
477 break;
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200478 case branch_inverter:
479 clk = rockchip_clk_register_inverter(
480 list->name, list->parent_names,
481 list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800482 ctx->reg_base + list->muxdiv_offset,
483 list->div_shift, list->div_flags, &ctx->lock);
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200484 break;
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200485 case branch_factor:
486 clk = rockchip_clk_register_factor_branch(
487 list->name, list->parent_names,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800488 list->num_parents, ctx->reg_base,
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200489 list->div_shift, list->div_width,
490 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800491 list->gate_flags, flags, &ctx->lock);
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200492 break;
Lin Huanga4f182b2016-08-22 11:36:17 +0800493 case branch_ddrclk:
494 clk = rockchip_clk_register_ddrclk(
495 list->name, list->flags,
496 list->parent_names, list->num_parents,
497 list->muxdiv_offset, list->mux_shift,
498 list->mux_width, list->div_shift,
499 list->div_width, list->div_flags,
500 ctx->reg_base, &ctx->lock);
501 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200502 }
503
504 /* none of the cases above matched */
505 if (!clk) {
506 pr_err("%s: unknown clock type %d\n",
507 __func__, list->branch_type);
508 continue;
509 }
510
511 if (IS_ERR(clk)) {
512 pr_err("%s: failed to register clock %s: %ld\n",
513 __func__, list->name, PTR_ERR(clk));
514 continue;
515 }
516
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800517 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200518 }
519}
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200520
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800521void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
522 unsigned int lookup_id,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200523 const char *name, const char *const *parent_names,
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200524 u8 num_parents,
525 const struct rockchip_cpuclk_reg_data *reg_data,
526 const struct rockchip_cpuclk_rate_table *rates,
527 int nrates)
528{
529 struct clk *clk;
530
531 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200532 reg_data, rates, nrates,
533 ctx->reg_base, &ctx->lock);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200534 if (IS_ERR(clk)) {
535 pr_err("%s: failed to register clock %s: %ld\n",
536 __func__, name, PTR_ERR(clk));
537 return;
538 }
539
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800540 rockchip_clk_add_lookup(ctx, clk, lookup_id);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200541}
542
Uwe Kleine-König692d8322015-02-18 10:59:45 +0100543void __init rockchip_clk_protect_critical(const char *const clocks[],
544 int nclocks)
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200545{
546 int i;
547
548 /* Protect the clocks that needs to stay on */
549 for (i = 0; i < nclocks; i++) {
550 struct clk *clk = __clk_lookup(clocks[i]);
551
552 if (clk)
553 clk_prepare_enable(clk);
554 }
555}
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700556
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800557static void __iomem *rst_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700558static unsigned int reg_restart;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100559static void (*cb_restart)(void);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700560static int rockchip_restart_notify(struct notifier_block *this,
561 unsigned long mode, void *cmd)
562{
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100563 if (cb_restart)
564 cb_restart();
565
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800566 writel(0xfdb9, rst_base + reg_restart);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700567 return NOTIFY_DONE;
568}
569
570static struct notifier_block rockchip_restart_handler = {
571 .notifier_call = rockchip_restart_notify,
572 .priority = 128,
573};
574
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200575void __init
576rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
577 unsigned int reg,
578 void (*cb)(void))
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700579{
580 int ret;
581
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800582 rst_base = ctx->reg_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700583 reg_restart = reg;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100584 cb_restart = cb;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700585 ret = register_restart_handler(&rockchip_restart_handler);
586 if (ret)
587 pr_err("%s: cannot register restart handler, %d\n",
588 __func__, ret);
589}