blob: 326b3fa44f5dad454948bcb6da0f1e11948cb91f [file] [log] [blame]
Heiko Stübnera245fec2014-07-03 01:58:39 +02001/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
Xing Zhengef1d9fe2016-03-09 10:37:04 +08005 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
6 * Author: Xing Zheng <zhengxing@rock-chips.com>
7 *
Heiko Stübnera245fec2014-07-03 01:58:39 +02008 * based on
9 *
10 * samsung/clk.c
11 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
12 * Copyright (c) 2013 Linaro Ltd.
13 * Author: Thomas Abraham <thomas.ab@samsung.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/slab.h>
27#include <linux/clk.h>
28#include <linux/clk-provider.h>
Heiko Stübner90c59022014-07-03 01:59:10 +020029#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
Heiko Stübner6f1294b2014-08-19 17:45:38 -070031#include <linux/reboot.h>
Elaine Zhang5d890c22017-08-01 18:22:24 +020032#include <linux/rational.h>
Heiko Stübnera245fec2014-07-03 01:58:39 +020033#include "clk.h"
34
35/**
36 * Register a clock branch.
37 * Most clock branches have a form like
38 *
39 * src1 --|--\
40 * |M |--[GATE]-[DIV]-
41 * src2 --|--/
42 *
43 * sometimes without one of those components.
44 */
Heiko Stübner1a4b1812014-08-27 00:54:56 +020045static struct clk *rockchip_clk_register_branch(const char *name,
Heiko Stuebner03ae1742016-04-19 21:29:27 +020046 const char *const *parent_names, u8 num_parents,
47 void __iomem *base,
Heiko Stübnera245fec2014-07-03 01:58:39 +020048 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
49 u8 div_shift, u8 div_width, u8 div_flags,
50 struct clk_div_table *div_table, int gate_offset,
51 u8 gate_shift, u8 gate_flags, unsigned long flags,
52 spinlock_t *lock)
53{
54 struct clk *clk;
55 struct clk_mux *mux = NULL;
56 struct clk_gate *gate = NULL;
57 struct clk_divider *div = NULL;
58 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
59 *gate_ops = NULL;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080060 int ret;
Heiko Stübnera245fec2014-07-03 01:58:39 +020061
62 if (num_parents > 1) {
63 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
64 if (!mux)
65 return ERR_PTR(-ENOMEM);
66
67 mux->reg = base + muxdiv_offset;
68 mux->shift = mux_shift;
69 mux->mask = BIT(mux_width) - 1;
70 mux->flags = mux_flags;
71 mux->lock = lock;
72 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
73 : &clk_mux_ops;
74 }
75
76 if (gate_offset >= 0) {
77 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
Shawn Linfd3cbbf2018-02-28 14:56:48 +080078 if (!gate) {
79 ret = -ENOMEM;
Shawn Lin2467b672016-02-02 11:37:50 +080080 goto err_gate;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080081 }
Heiko Stübnera245fec2014-07-03 01:58:39 +020082
83 gate->flags = gate_flags;
84 gate->reg = base + gate_offset;
85 gate->bit_idx = gate_shift;
86 gate->lock = lock;
87 gate_ops = &clk_gate_ops;
88 }
89
90 if (div_width > 0) {
91 div = kzalloc(sizeof(*div), GFP_KERNEL);
Shawn Linfd3cbbf2018-02-28 14:56:48 +080092 if (!div) {
93 ret = -ENOMEM;
Shawn Lin2467b672016-02-02 11:37:50 +080094 goto err_div;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080095 }
Heiko Stübnera245fec2014-07-03 01:58:39 +020096
97 div->flags = div_flags;
98 div->reg = base + muxdiv_offset;
99 div->shift = div_shift;
100 div->width = div_width;
101 div->lock = lock;
102 div->table = div_table;
Heiko Stuebner50359812016-01-21 21:53:09 +0100103 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
104 ? &clk_divider_ro_ops
105 : &clk_divider_ops;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200106 }
107
108 clk = clk_register_composite(NULL, name, parent_names, num_parents,
109 mux ? &mux->hw : NULL, mux_ops,
110 div ? &div->hw : NULL, div_ops,
111 gate ? &gate->hw : NULL, gate_ops,
112 flags);
113
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800114 if (IS_ERR(clk)) {
115 ret = PTR_ERR(clk);
116 goto err_composite;
117 }
118
Heiko Stübnera245fec2014-07-03 01:58:39 +0200119 return clk;
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800120err_composite:
121 kfree(div);
Shawn Lin2467b672016-02-02 11:37:50 +0800122err_div:
123 kfree(gate);
124err_gate:
125 kfree(mux);
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800126 return ERR_PTR(ret);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200127}
128
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100129struct rockchip_clk_frac {
130 struct notifier_block clk_nb;
131 struct clk_fractional_divider div;
132 struct clk_gate gate;
133
134 struct clk_mux mux;
135 const struct clk_ops *mux_ops;
136 int mux_frac_idx;
137
138 bool rate_change_remuxed;
139 int rate_change_idx;
140};
141
142#define to_rockchip_clk_frac_nb(nb) \
143 container_of(nb, struct rockchip_clk_frac, clk_nb)
144
145static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
146 unsigned long event, void *data)
147{
148 struct clk_notifier_data *ndata = data;
149 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
150 struct clk_mux *frac_mux = &frac->mux;
151 int ret = 0;
152
153 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
154 __func__, event, ndata->old_rate, ndata->new_rate);
155 if (event == PRE_RATE_CHANGE) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200156 frac->rate_change_idx =
157 frac->mux_ops->get_parent(&frac_mux->hw);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100158 if (frac->rate_change_idx != frac->mux_frac_idx) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200159 frac->mux_ops->set_parent(&frac_mux->hw,
160 frac->mux_frac_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100161 frac->rate_change_remuxed = 1;
162 }
163 } else if (event == POST_RATE_CHANGE) {
164 /*
165 * The POST_RATE_CHANGE notifier runs directly after the
166 * divider clock is set in clk_change_rate, so we'll have
167 * remuxed back to the original parent before clk_change_rate
168 * reaches the mux itself.
169 */
170 if (frac->rate_change_remuxed) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200171 frac->mux_ops->set_parent(&frac_mux->hw,
172 frac->rate_change_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100173 frac->rate_change_remuxed = 0;
174 }
175 }
176
177 return notifier_from_errno(ret);
178}
179
Elaine Zhang5d890c22017-08-01 18:22:24 +0200180/**
181 * fractional divider must set that denominator is 20 times larger than
182 * numerator to generate precise clock frequency.
183 */
Stephen Boyd1dfcfa72017-08-23 15:35:41 -0700184static void rockchip_fractional_approximation(struct clk_hw *hw,
Elaine Zhang5d890c22017-08-01 18:22:24 +0200185 unsigned long rate, unsigned long *parent_rate,
186 unsigned long *m, unsigned long *n)
187{
188 struct clk_fractional_divider *fd = to_clk_fd(hw);
189 unsigned long p_rate, p_parent_rate;
190 struct clk_hw *p_parent;
191 unsigned long scale;
192
193 p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
194 if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
195 p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
196 p_parent_rate = clk_hw_get_rate(p_parent);
197 *parent_rate = p_parent_rate;
198 }
199
200 /*
201 * Get rate closer to *parent_rate to guarantee there is no overflow
202 * for m and n. In the result it will be the nearest rate left shifted
203 * by (scale - fd->nwidth) bits.
204 */
205 scale = fls_long(*parent_rate / rate - 1);
206 if (scale > fd->nwidth)
207 rate <<= scale - fd->nwidth;
208
209 rational_best_approximation(rate, *parent_rate,
210 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
211 m, n);
212}
213
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800214static struct clk *rockchip_clk_register_frac_branch(
215 struct rockchip_clk_provider *ctx, const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200216 const char *const *parent_names, u8 num_parents,
217 void __iomem *base, int muxdiv_offset, u8 div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200218 int gate_offset, u8 gate_shift, u8 gate_flags,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100219 unsigned long flags, struct rockchip_clk_branch *child,
220 spinlock_t *lock)
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200221{
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100222 struct rockchip_clk_frac *frac;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200223 struct clk *clk;
224 struct clk_gate *gate = NULL;
225 struct clk_fractional_divider *div = NULL;
226 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
227
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100228 if (muxdiv_offset < 0)
229 return ERR_PTR(-EINVAL);
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200230
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100231 if (child && child->branch_type != branch_mux) {
232 pr_err("%s: fractional child clock for %s can only be a mux\n",
233 __func__, name);
234 return ERR_PTR(-EINVAL);
235 }
236
237 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
238 if (!frac)
239 return ERR_PTR(-ENOMEM);
240
241 if (gate_offset >= 0) {
242 gate = &frac->gate;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200243 gate->flags = gate_flags;
244 gate->reg = base + gate_offset;
245 gate->bit_idx = gate_shift;
246 gate->lock = lock;
247 gate_ops = &clk_gate_ops;
248 }
249
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100250 div = &frac->div;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200251 div->flags = div_flags;
252 div->reg = base + muxdiv_offset;
253 div->mshift = 16;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300254 div->mwidth = 16;
255 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200256 div->nshift = 0;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300257 div->nwidth = 16;
258 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200259 div->lock = lock;
Elaine Zhang5d890c22017-08-01 18:22:24 +0200260 div->approximation = rockchip_fractional_approximation;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200261 div_ops = &clk_fractional_divider_ops;
262
263 clk = clk_register_composite(NULL, name, parent_names, num_parents,
264 NULL, NULL,
265 &div->hw, div_ops,
266 gate ? &gate->hw : NULL, gate_ops,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100267 flags | CLK_SET_RATE_UNGATE);
268 if (IS_ERR(clk)) {
269 kfree(frac);
270 return clk;
271 }
272
273 if (child) {
274 struct clk_mux *frac_mux = &frac->mux;
275 struct clk_init_data init;
276 struct clk *mux_clk;
Yisheng Xiea4257022018-05-21 19:57:50 +0800277 int ret;
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100278
Yisheng Xiea4257022018-05-21 19:57:50 +0800279 frac->mux_frac_idx = match_string(child->parent_names,
280 child->num_parents, name);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100281 frac->mux_ops = &clk_mux_ops;
282 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
283
284 frac_mux->reg = base + child->muxdiv_offset;
285 frac_mux->shift = child->mux_shift;
286 frac_mux->mask = BIT(child->mux_width) - 1;
287 frac_mux->flags = child->mux_flags;
288 frac_mux->lock = lock;
289 frac_mux->hw.init = &init;
290
291 init.name = child->name;
292 init.flags = child->flags | CLK_SET_RATE_PARENT;
293 init.ops = frac->mux_ops;
294 init.parent_names = child->parent_names;
295 init.num_parents = child->num_parents;
296
297 mux_clk = clk_register(NULL, &frac_mux->hw);
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800298 if (IS_ERR(mux_clk)) {
299 kfree(frac);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100300 return clk;
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800301 }
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100302
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800303 rockchip_clk_add_lookup(ctx, mux_clk, child->id);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100304
305 /* notifier on the fraction divider to catch rate changes */
306 if (frac->mux_frac_idx >= 0) {
Yisheng Xiea4257022018-05-21 19:57:50 +0800307 pr_debug("%s: found fractional parent in mux at pos %d\n",
308 __func__, frac->mux_frac_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100309 ret = clk_notifier_register(clk, &frac->clk_nb);
310 if (ret)
311 pr_err("%s: failed to register clock notifier for %s\n",
312 __func__, name);
313 } else {
314 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
315 __func__, name, child->name);
316 }
317 }
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200318
319 return clk;
320}
321
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200322static struct clk *rockchip_clk_register_factor_branch(const char *name,
323 const char *const *parent_names, u8 num_parents,
324 void __iomem *base, unsigned int mult, unsigned int div,
325 int gate_offset, u8 gate_shift, u8 gate_flags,
326 unsigned long flags, spinlock_t *lock)
327{
328 struct clk *clk;
329 struct clk_gate *gate = NULL;
330 struct clk_fixed_factor *fix = NULL;
331
332 /* without gate, register a simple factor clock */
333 if (gate_offset == 0) {
334 return clk_register_fixed_factor(NULL, name,
335 parent_names[0], flags, mult,
336 div);
337 }
338
339 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
340 if (!gate)
341 return ERR_PTR(-ENOMEM);
342
343 gate->flags = gate_flags;
344 gate->reg = base + gate_offset;
345 gate->bit_idx = gate_shift;
346 gate->lock = lock;
347
348 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
349 if (!fix) {
350 kfree(gate);
351 return ERR_PTR(-ENOMEM);
352 }
353
354 fix->mult = mult;
355 fix->div = div;
356
357 clk = clk_register_composite(NULL, name, parent_names, num_parents,
358 NULL, NULL,
359 &fix->hw, &clk_fixed_factor_ops,
360 &gate->hw, &clk_gate_ops, flags);
361 if (IS_ERR(clk)) {
362 kfree(fix);
363 kfree(gate);
364 }
365
366 return clk;
367}
368
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800369struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
370 void __iomem *base, unsigned long nr_clks)
Heiko Stübnera245fec2014-07-03 01:58:39 +0200371{
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800372 struct rockchip_clk_provider *ctx;
373 struct clk **clk_table;
374 int i;
375
376 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200377 if (!ctx)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800378 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200379
380 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200381 if (!clk_table)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800382 goto err_free;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200383
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800384 for (i = 0; i < nr_clks; ++i)
385 clk_table[i] = ERR_PTR(-ENOENT);
386
387 ctx->reg_base = base;
388 ctx->clk_data.clks = clk_table;
389 ctx->clk_data.clk_num = nr_clks;
390 ctx->cru_node = np;
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800391 spin_lock_init(&ctx->lock);
392
Heiko Stuebner6f339dc2016-03-15 16:40:32 +0100393 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
394 "rockchip,grf");
395
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800396 return ctx;
397
398err_free:
399 kfree(ctx);
400 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200401}
402
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800403void __init rockchip_clk_of_add_provider(struct device_node *np,
404 struct rockchip_clk_provider *ctx)
Heiko Stübner90c59022014-07-03 01:59:10 +0200405{
Shawn Linff1ae202016-03-13 00:25:53 +0800406 if (of_clk_add_provider(np, of_clk_src_onecell_get,
407 &ctx->clk_data))
408 pr_err("%s: could not register clk provider\n", __func__);
Heiko Stübner90c59022014-07-03 01:59:10 +0200409}
410
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800411void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
412 struct clk *clk, unsigned int id)
413{
414 if (ctx->clk_data.clks && id)
415 ctx->clk_data.clks[id] = clk;
416}
417
418void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
419 struct rockchip_pll_clock *list,
Heiko Stübner90c59022014-07-03 01:59:10 +0200420 unsigned int nr_pll, int grf_lock_offset)
421{
422 struct clk *clk;
423 int idx;
424
425 for (idx = 0; idx < nr_pll; idx++, list++) {
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800426 clk = rockchip_clk_register_pll(ctx, list->type, list->name,
Heiko Stübner90c59022014-07-03 01:59:10 +0200427 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800428 list->con_offset, grf_lock_offset,
Heiko Stübner90c59022014-07-03 01:59:10 +0200429 list->lock_shift, list->mode_offset,
Heiko Stuebner4f8a7c52014-11-20 20:38:50 +0100430 list->mode_shift, list->rate_table,
Heiko Stübnere6cebc72016-07-29 15:56:55 +0800431 list->flags, list->pll_flags);
Heiko Stübner90c59022014-07-03 01:59:10 +0200432 if (IS_ERR(clk)) {
433 pr_err("%s: failed to register clock %s\n", __func__,
434 list->name);
435 continue;
436 }
437
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800438 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübner90c59022014-07-03 01:59:10 +0200439 }
440}
441
Heiko Stübnera245fec2014-07-03 01:58:39 +0200442void __init rockchip_clk_register_branches(
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800443 struct rockchip_clk_provider *ctx,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200444 struct rockchip_clk_branch *list,
445 unsigned int nr_clk)
446{
447 struct clk *clk = NULL;
448 unsigned int idx;
449 unsigned long flags;
450
451 for (idx = 0; idx < nr_clk; idx++, list++) {
452 flags = list->flags;
453
454 /* catch simple muxes */
455 switch (list->branch_type) {
456 case branch_mux:
457 clk = clk_register_mux(NULL, list->name,
458 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800459 flags, ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200460 list->mux_shift, list->mux_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800461 list->mux_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200462 break;
Heiko Stuebnercb1d9f62016-12-27 00:00:38 +0100463 case branch_muxgrf:
464 clk = rockchip_clk_register_muxgrf(list->name,
465 list->parent_names, list->num_parents,
466 flags, ctx->grf, list->muxdiv_offset,
467 list->mux_shift, list->mux_width,
468 list->mux_flags);
469 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200470 case branch_divider:
471 if (list->div_table)
472 clk = clk_register_divider_table(NULL,
473 list->name, list->parent_names[0],
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200474 flags,
475 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200476 list->div_shift, list->div_width,
477 list->div_flags, list->div_table,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800478 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200479 else
480 clk = clk_register_divider(NULL, list->name,
481 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800482 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200483 list->div_shift, list->div_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800484 list->div_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200485 break;
486 case branch_fraction_divider:
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800487 clk = rockchip_clk_register_frac_branch(ctx, list->name,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200488 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200489 ctx->reg_base, list->muxdiv_offset,
490 list->div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200491 list->gate_offset, list->gate_shift,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100492 list->gate_flags, flags, list->child,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800493 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200494 break;
495 case branch_gate:
496 flags |= CLK_SET_RATE_PARENT;
497
Heiko Stübnera245fec2014-07-03 01:58:39 +0200498 clk = clk_register_gate(NULL, list->name,
499 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800500 ctx->reg_base + list->gate_offset,
501 list->gate_shift, list->gate_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200502 break;
503 case branch_composite:
Heiko Stübnera245fec2014-07-03 01:58:39 +0200504 clk = rockchip_clk_register_branch(list->name,
505 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200506 ctx->reg_base, list->muxdiv_offset,
507 list->mux_shift,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200508 list->mux_width, list->mux_flags,
509 list->div_shift, list->div_width,
510 list->div_flags, list->div_table,
511 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800512 list->gate_flags, flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200513 break;
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800514 case branch_mmc:
515 clk = rockchip_clk_register_mmc(
516 list->name,
517 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800518 ctx->reg_base + list->muxdiv_offset,
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800519 list->div_shift
520 );
521 break;
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200522 case branch_inverter:
523 clk = rockchip_clk_register_inverter(
524 list->name, list->parent_names,
525 list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800526 ctx->reg_base + list->muxdiv_offset,
527 list->div_shift, list->div_flags, &ctx->lock);
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200528 break;
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200529 case branch_factor:
530 clk = rockchip_clk_register_factor_branch(
531 list->name, list->parent_names,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800532 list->num_parents, ctx->reg_base,
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200533 list->div_shift, list->div_width,
534 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800535 list->gate_flags, flags, &ctx->lock);
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200536 break;
Lin Huanga4f182b2016-08-22 11:36:17 +0800537 case branch_ddrclk:
538 clk = rockchip_clk_register_ddrclk(
539 list->name, list->flags,
540 list->parent_names, list->num_parents,
541 list->muxdiv_offset, list->mux_shift,
542 list->mux_width, list->div_shift,
543 list->div_width, list->div_flags,
544 ctx->reg_base, &ctx->lock);
545 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200546 }
547
548 /* none of the cases above matched */
549 if (!clk) {
550 pr_err("%s: unknown clock type %d\n",
551 __func__, list->branch_type);
552 continue;
553 }
554
555 if (IS_ERR(clk)) {
556 pr_err("%s: failed to register clock %s: %ld\n",
557 __func__, list->name, PTR_ERR(clk));
558 continue;
559 }
560
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800561 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200562 }
563}
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200564
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800565void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
566 unsigned int lookup_id,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200567 const char *name, const char *const *parent_names,
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200568 u8 num_parents,
569 const struct rockchip_cpuclk_reg_data *reg_data,
570 const struct rockchip_cpuclk_rate_table *rates,
571 int nrates)
572{
573 struct clk *clk;
574
575 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200576 reg_data, rates, nrates,
577 ctx->reg_base, &ctx->lock);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200578 if (IS_ERR(clk)) {
579 pr_err("%s: failed to register clock %s: %ld\n",
580 __func__, name, PTR_ERR(clk));
581 return;
582 }
583
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800584 rockchip_clk_add_lookup(ctx, clk, lookup_id);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200585}
586
Uwe Kleine-König692d8322015-02-18 10:59:45 +0100587void __init rockchip_clk_protect_critical(const char *const clocks[],
588 int nclocks)
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200589{
590 int i;
591
592 /* Protect the clocks that needs to stay on */
593 for (i = 0; i < nclocks; i++) {
594 struct clk *clk = __clk_lookup(clocks[i]);
595
596 if (clk)
597 clk_prepare_enable(clk);
598 }
599}
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700600
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800601static void __iomem *rst_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700602static unsigned int reg_restart;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100603static void (*cb_restart)(void);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700604static int rockchip_restart_notify(struct notifier_block *this,
605 unsigned long mode, void *cmd)
606{
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100607 if (cb_restart)
608 cb_restart();
609
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800610 writel(0xfdb9, rst_base + reg_restart);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700611 return NOTIFY_DONE;
612}
613
614static struct notifier_block rockchip_restart_handler = {
615 .notifier_call = rockchip_restart_notify,
616 .priority = 128,
617};
618
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200619void __init
620rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
621 unsigned int reg,
622 void (*cb)(void))
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700623{
624 int ret;
625
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800626 rst_base = ctx->reg_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700627 reg_restart = reg;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100628 cb_restart = cb;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700629 ret = register_restart_handler(&rockchip_restart_handler);
630 if (ret)
631 pr_err("%s: cannot register restart handler, %d\n",
632 __func__, ret);
633}