blob: ff8f8a17715602c6f8c6d02009f047124451f093 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010027#include <linux/sched/clock.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070028#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000029#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000030
31#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000032#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000033
34#include <clocksource/arm_arch_timer.h>
35
Fu Weided24012017-01-18 21:25:25 +080036#undef pr_fmt
37#define pr_fmt(fmt) "arch_timer: " fmt
38
Stephen Boyd22006992013-07-18 16:59:32 -070039#define CNTTIDR 0x08
40#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
41
Robin Murphye392d602016-02-01 12:00:48 +000042#define CNTACR(n) (0x40 + ((n) * 4))
43#define CNTACR_RPCT BIT(0)
44#define CNTACR_RVCT BIT(1)
45#define CNTACR_RFRQ BIT(2)
46#define CNTACR_RVOFF BIT(3)
47#define CNTACR_RWVT BIT(4)
48#define CNTACR_RWPT BIT(5)
49
Stephen Boyd22006992013-07-18 16:59:32 -070050#define CNTVCT_LO 0x08
51#define CNTVCT_HI 0x0c
52#define CNTFRQ 0x10
53#define CNTP_TVAL 0x28
54#define CNTP_CTL 0x2c
55#define CNTV_TVAL 0x38
56#define CNTV_CTL 0x3c
57
Stephen Boyd22006992013-07-18 16:59:32 -070058static unsigned arch_timers_present __initdata;
59
60static void __iomem *arch_counter_base;
61
62struct arch_timer {
63 void __iomem *base;
64 struct clock_event_device evt;
65};
66
67#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
68
Mark Rutland8a4da6e2012-11-12 14:33:44 +000069static u32 arch_timer_rate;
Fu Weiee34f1e2017-01-18 21:25:27 +080070static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
Mark Rutland8a4da6e2012-11-12 14:33:44 +000071
72static struct clock_event_device __percpu *arch_timer_evt;
73
Fu Weiee34f1e2017-01-18 21:25:27 +080074static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010075static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070076static bool arch_timer_mem_use_virtual;
Brian Norrisd8ec7592016-10-04 11:12:09 -070077static bool arch_counter_suspend_stop;
Marc Zyngiera86bd132017-02-01 12:07:15 +000078static bool vdso_default = true;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000079
Will Deacon46fd5c62016-06-27 17:30:13 +010080static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
81
82static int __init early_evtstrm_cfg(char *buf)
83{
84 return strtobool(buf, &evtstrm_enable);
85}
86early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
87
Mark Rutland8a4da6e2012-11-12 14:33:44 +000088/*
89 * Architected system timer support.
90 */
91
Marc Zyngierf4e00a12017-01-20 18:28:32 +000092static __always_inline
93void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
94 struct clock_event_device *clk)
95{
96 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
97 struct arch_timer *timer = to_arch_timer(clk);
98 switch (reg) {
99 case ARCH_TIMER_REG_CTRL:
100 writel_relaxed(val, timer->base + CNTP_CTL);
101 break;
102 case ARCH_TIMER_REG_TVAL:
103 writel_relaxed(val, timer->base + CNTP_TVAL);
104 break;
105 }
106 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
107 struct arch_timer *timer = to_arch_timer(clk);
108 switch (reg) {
109 case ARCH_TIMER_REG_CTRL:
110 writel_relaxed(val, timer->base + CNTV_CTL);
111 break;
112 case ARCH_TIMER_REG_TVAL:
113 writel_relaxed(val, timer->base + CNTV_TVAL);
114 break;
115 }
116 } else {
117 arch_timer_reg_write_cp15(access, reg, val);
118 }
119}
120
121static __always_inline
122u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
123 struct clock_event_device *clk)
124{
125 u32 val;
126
127 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
128 struct arch_timer *timer = to_arch_timer(clk);
129 switch (reg) {
130 case ARCH_TIMER_REG_CTRL:
131 val = readl_relaxed(timer->base + CNTP_CTL);
132 break;
133 case ARCH_TIMER_REG_TVAL:
134 val = readl_relaxed(timer->base + CNTP_TVAL);
135 break;
136 }
137 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
138 struct arch_timer *timer = to_arch_timer(clk);
139 switch (reg) {
140 case ARCH_TIMER_REG_CTRL:
141 val = readl_relaxed(timer->base + CNTV_CTL);
142 break;
143 case ARCH_TIMER_REG_TVAL:
144 val = readl_relaxed(timer->base + CNTV_TVAL);
145 break;
146 }
147 } else {
148 val = arch_timer_reg_read_cp15(access, reg);
149 }
150
151 return val;
152}
153
Marc Zyngier992dd162017-02-01 11:53:46 +0000154/*
155 * Default to cp15 based access because arm64 uses this function for
156 * sched_clock() before DT is probed and the cp15 method is guaranteed
157 * to exist on arm64. arm doesn't use this before DT is probed so even
158 * if we don't have the cp15 accessors we won't have a problem.
159 */
160u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
161
162static u64 arch_counter_read(struct clocksource *cs)
163{
164 return arch_timer_read_counter();
165}
166
167static u64 arch_counter_read_cc(const struct cyclecounter *cc)
168{
169 return arch_timer_read_counter();
170}
171
172static struct clocksource clocksource_counter = {
173 .name = "arch_sys_counter",
174 .rating = 400,
175 .read = arch_counter_read,
176 .mask = CLOCKSOURCE_MASK(56),
177 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
178};
179
180static struct cyclecounter cyclecounter __ro_after_init = {
181 .read = arch_counter_read_cc,
182 .mask = CLOCKSOURCE_MASK(56),
183};
184
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000185struct ate_acpi_oem_info {
186 char oem_id[ACPI_OEM_ID_SIZE + 1];
187 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
188 u32 oem_revision;
189};
190
Scott Woodf6dc1572016-09-22 03:35:17 -0500191#ifdef CONFIG_FSL_ERRATUM_A008585
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000192/*
193 * The number of retries is an arbitrary value well beyond the highest number
194 * of iterations the loop has been observed to take.
195 */
196#define __fsl_a008585_read_reg(reg) ({ \
197 u64 _old, _new; \
198 int _retries = 200; \
199 \
200 do { \
201 _old = read_sysreg(reg); \
202 _new = read_sysreg(reg); \
203 _retries--; \
204 } while (unlikely(_old != _new) && _retries); \
205 \
206 WARN_ON_ONCE(!_retries); \
207 _new; \
208})
Scott Woodf6dc1572016-09-22 03:35:17 -0500209
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000210static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500211{
212 return __fsl_a008585_read_reg(cntp_tval_el0);
213}
214
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000215static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500216{
217 return __fsl_a008585_read_reg(cntv_tval_el0);
218}
219
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200220static u64 notrace fsl_a008585_read_cntpct_el0(void)
221{
222 return __fsl_a008585_read_reg(cntpct_el0);
223}
224
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000225static u64 notrace fsl_a008585_read_cntvct_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500226{
227 return __fsl_a008585_read_reg(cntvct_el0);
228}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000229#endif
230
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000231#ifdef CONFIG_HISILICON_ERRATUM_161010101
232/*
233 * Verify whether the value of the second read is larger than the first by
234 * less than 32 is the only way to confirm the value is correct, so clear the
235 * lower 5 bits to check whether the difference is greater than 32 or not.
236 * Theoretically the erratum should not occur more than twice in succession
237 * when reading the system counter, but it is possible that some interrupts
238 * may lead to more than twice read errors, triggering the warning, so setting
239 * the number of retries far beyond the number of iterations the loop has been
240 * observed to take.
241 */
242#define __hisi_161010101_read_reg(reg) ({ \
243 u64 _old, _new; \
244 int _retries = 50; \
245 \
246 do { \
247 _old = read_sysreg(reg); \
248 _new = read_sysreg(reg); \
249 _retries--; \
250 } while (unlikely((_new - _old) >> 5) && _retries); \
251 \
252 WARN_ON_ONCE(!_retries); \
253 _new; \
254})
255
256static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
257{
258 return __hisi_161010101_read_reg(cntp_tval_el0);
259}
260
261static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
262{
263 return __hisi_161010101_read_reg(cntv_tval_el0);
264}
265
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200266static u64 notrace hisi_161010101_read_cntpct_el0(void)
267{
268 return __hisi_161010101_read_reg(cntpct_el0);
269}
270
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000271static u64 notrace hisi_161010101_read_cntvct_el0(void)
272{
273 return __hisi_161010101_read_reg(cntvct_el0);
274}
Marc Zyngierd003d022017-02-21 15:04:27 +0000275
276static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
277 /*
278 * Note that trailing spaces are required to properly match
279 * the OEM table information.
280 */
281 {
282 .oem_id = "HISI ",
283 .oem_table_id = "HIP05 ",
284 .oem_revision = 0,
285 },
286 {
287 .oem_id = "HISI ",
288 .oem_table_id = "HIP06 ",
289 .oem_revision = 0,
290 },
291 {
292 .oem_id = "HISI ",
293 .oem_table_id = "HIP07 ",
294 .oem_revision = 0,
295 },
296 { /* Sentinel indicating the end of the OEM array */ },
297};
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000298#endif
299
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000300#ifdef CONFIG_ARM64_ERRATUM_858921
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200301static u64 notrace arm64_858921_read_cntpct_el0(void)
302{
303 u64 old, new;
304
305 old = read_sysreg(cntpct_el0);
306 new = read_sysreg(cntpct_el0);
307 return (((old ^ new) >> 32) & 1) ? old : new;
308}
309
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000310static u64 notrace arm64_858921_read_cntvct_el0(void)
311{
312 u64 old, new;
313
314 old = read_sysreg(cntvct_el0);
315 new = read_sysreg(cntvct_el0);
316 return (((old ^ new) >> 32) & 1) ? old : new;
317}
318#endif
319
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000320#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000321DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
322 timer_unstable_counter_workaround);
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000323EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
324
325DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
326EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
327
Marc Zyngier83280892017-01-27 10:27:09 +0000328static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
329 struct clock_event_device *clk)
330{
331 unsigned long ctrl;
332 u64 cval = evt + arch_counter_get_cntvct();
333
334 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
335 ctrl |= ARCH_TIMER_CTRL_ENABLE;
336 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
337
338 if (access == ARCH_TIMER_PHYS_ACCESS)
339 write_sysreg(cval, cntp_cval_el0);
340 else
341 write_sysreg(cval, cntv_cval_el0);
342
343 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
344}
345
Arnd Bergmanneb645222017-04-19 19:37:09 +0200346static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
Marc Zyngier83280892017-01-27 10:27:09 +0000347 struct clock_event_device *clk)
348{
349 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
350 return 0;
351}
352
Arnd Bergmanneb645222017-04-19 19:37:09 +0200353static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
Marc Zyngier83280892017-01-27 10:27:09 +0000354 struct clock_event_device *clk)
355{
356 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
357 return 0;
358}
359
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000360static const struct arch_timer_erratum_workaround ool_workarounds[] = {
361#ifdef CONFIG_FSL_ERRATUM_A008585
362 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000363 .match_type = ate_match_dt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000364 .id = "fsl,erratum-a008585",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000365 .desc = "Freescale erratum a005858",
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000366 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
367 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200368 .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000369 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000370 .set_next_event_phys = erratum_set_next_event_tval_phys,
371 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000372 },
373#endif
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000374#ifdef CONFIG_HISILICON_ERRATUM_161010101
375 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000376 .match_type = ate_match_dt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000377 .id = "hisilicon,erratum-161010101",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000378 .desc = "HiSilicon erratum 161010101",
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000379 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
380 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200381 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000382 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000383 .set_next_event_phys = erratum_set_next_event_tval_phys,
384 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000385 },
Marc Zyngierd003d022017-02-21 15:04:27 +0000386 {
387 .match_type = ate_match_acpi_oem_info,
388 .id = hisi_161010101_oem_info,
389 .desc = "HiSilicon erratum 161010101",
390 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
391 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200392 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
Marc Zyngierd003d022017-02-21 15:04:27 +0000393 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
394 .set_next_event_phys = erratum_set_next_event_tval_phys,
395 .set_next_event_virt = erratum_set_next_event_tval_virt,
396 },
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000397#endif
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000398#ifdef CONFIG_ARM64_ERRATUM_858921
399 {
400 .match_type = ate_match_local_cap_id,
401 .id = (void *)ARM64_WORKAROUND_858921,
402 .desc = "ARM erratum 858921",
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200403 .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000404 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
405 },
406#endif
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000407};
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000408
409typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
410 const void *);
411
412static
413bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
414 const void *arg)
415{
416 const struct device_node *np = arg;
417
418 return of_property_read_bool(np, wa->id);
419}
420
Marc Zyngier00640302017-03-20 16:47:59 +0000421static
422bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
423 const void *arg)
424{
425 return this_cpu_has_cap((uintptr_t)wa->id);
426}
427
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000428
429static
430bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
431 const void *arg)
432{
433 static const struct ate_acpi_oem_info empty_oem_info = {};
434 const struct ate_acpi_oem_info *info = wa->id;
435 const struct acpi_table_header *table = arg;
436
437 /* Iterate over the ACPI OEM info array, looking for a match */
438 while (memcmp(info, &empty_oem_info, sizeof(*info))) {
439 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
440 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
441 info->oem_revision == table->oem_revision)
442 return true;
443
444 info++;
445 }
446
447 return false;
448}
449
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000450static const struct arch_timer_erratum_workaround *
451arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
452 ate_match_fn_t match_fn,
453 void *arg)
454{
455 int i;
456
457 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
458 if (ool_workarounds[i].match_type != type)
459 continue;
460
461 if (match_fn(&ool_workarounds[i], arg))
462 return &ool_workarounds[i];
463 }
464
465 return NULL;
466}
467
468static
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000469void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
470 bool local)
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000471{
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000472 int i;
473
474 if (local) {
475 __this_cpu_write(timer_unstable_counter_workaround, wa);
476 } else {
477 for_each_possible_cpu(i)
478 per_cpu(timer_unstable_counter_workaround, i) = wa;
479 }
480
Marc Zyngier450f9682017-08-01 09:02:57 +0100481 /*
482 * Use the locked version, as we're called from the CPU
483 * hotplug framework. Otherwise, we end-up in deadlock-land.
484 */
485 static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
Marc Zyngiera86bd132017-02-01 12:07:15 +0000486
487 /*
488 * Don't use the vdso fastpath if errata require using the
489 * out-of-line counter accessor. We may change our mind pretty
490 * late in the game (with a per-CPU erratum, for example), so
491 * change both the default value and the vdso itself.
492 */
493 if (wa->read_cntvct_el0) {
494 clocksource_counter.archdata.vdso_direct = false;
495 vdso_default = false;
496 }
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000497}
498
499static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
500 void *arg)
501{
502 const struct arch_timer_erratum_workaround *wa;
503 ate_match_fn_t match_fn = NULL;
Marc Zyngier00640302017-03-20 16:47:59 +0000504 bool local = false;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000505
506 switch (type) {
507 case ate_match_dt:
508 match_fn = arch_timer_check_dt_erratum;
509 break;
Marc Zyngier00640302017-03-20 16:47:59 +0000510 case ate_match_local_cap_id:
511 match_fn = arch_timer_check_local_cap_erratum;
512 local = true;
513 break;
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000514 case ate_match_acpi_oem_info:
515 match_fn = arch_timer_check_acpi_oem_erratum;
516 break;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000517 default:
518 WARN_ON(1);
519 return;
520 }
521
522 wa = arch_timer_iterate_errata(type, match_fn, arg);
523 if (!wa)
524 return;
525
Marc Zyngier00640302017-03-20 16:47:59 +0000526 if (needs_unstable_timer_counter_workaround()) {
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000527 const struct arch_timer_erratum_workaround *__wa;
528 __wa = __this_cpu_read(timer_unstable_counter_workaround);
529 if (__wa && wa != __wa)
Marc Zyngier00640302017-03-20 16:47:59 +0000530 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000531 wa->desc, __wa->desc);
532
533 if (__wa)
534 return;
Marc Zyngier00640302017-03-20 16:47:59 +0000535 }
536
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000537 arch_timer_enable_workaround(wa, local);
Marc Zyngier00640302017-03-20 16:47:59 +0000538 pr_info("Enabling %s workaround for %s\n",
539 local ? "local" : "global", wa->desc);
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000540}
541
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000542#define erratum_handler(fn, r, ...) \
543({ \
544 bool __val; \
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000545 if (needs_unstable_timer_counter_workaround()) { \
546 const struct arch_timer_erratum_workaround *__wa; \
547 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
548 if (__wa && __wa->fn) { \
549 r = __wa->fn(__VA_ARGS__); \
550 __val = true; \
551 } else { \
552 __val = false; \
553 } \
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000554 } else { \
555 __val = false; \
556 } \
557 __val; \
558})
559
Marc Zyngiera86bd132017-02-01 12:07:15 +0000560static bool arch_timer_this_cpu_has_cntvct_wa(void)
561{
562 const struct arch_timer_erratum_workaround *wa;
563
564 wa = __this_cpu_read(timer_unstable_counter_workaround);
565 return wa && wa->read_cntvct_el0;
566}
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000567#else
568#define arch_timer_check_ool_workaround(t,a) do { } while(0)
Marc Zyngier83280892017-01-27 10:27:09 +0000569#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
570#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000571#define erratum_handler(fn, r, ...) ({false;})
Marc Zyngiera86bd132017-02-01 12:07:15 +0000572#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000573#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500574
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700575static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000576 struct clock_event_device *evt)
577{
578 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200579
Stephen Boyd60faddf2013-07-18 16:59:31 -0700580 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000581 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
582 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700583 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000584 evt->event_handler(evt);
585 return IRQ_HANDLED;
586 }
587
588 return IRQ_NONE;
589}
590
591static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
592{
593 struct clock_event_device *evt = dev_id;
594
595 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
596}
597
598static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
599{
600 struct clock_event_device *evt = dev_id;
601
602 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
603}
604
Stephen Boyd22006992013-07-18 16:59:32 -0700605static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
606{
607 struct clock_event_device *evt = dev_id;
608
609 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
610}
611
612static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
613{
614 struct clock_event_device *evt = dev_id;
615
616 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
617}
618
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530619static __always_inline int timer_shutdown(const int access,
620 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000621{
622 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530623
624 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
625 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
626 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
627
628 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000629}
630
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530631static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000632{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530633 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000634}
635
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530636static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000637{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530638 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000639}
640
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530641static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700642{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530643 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700644}
645
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530646static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700647{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530648 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700649}
650
Stephen Boyd60faddf2013-07-18 16:59:31 -0700651static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200652 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000653{
654 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700655 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000656 ctrl |= ARCH_TIMER_CTRL_ENABLE;
657 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700658 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
659 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000660}
661
662static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700663 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000664{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000665 int ret;
666
667 if (erratum_handler(set_next_event_virt, ret, evt, clk))
668 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000669
Stephen Boyd60faddf2013-07-18 16:59:31 -0700670 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000671 return 0;
672}
673
674static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700675 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000676{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000677 int ret;
678
679 if (erratum_handler(set_next_event_phys, ret, evt, clk))
680 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000681
Stephen Boyd60faddf2013-07-18 16:59:31 -0700682 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000683 return 0;
684}
685
Stephen Boyd22006992013-07-18 16:59:32 -0700686static int arch_timer_set_next_event_virt_mem(unsigned long evt,
687 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000688{
Stephen Boyd22006992013-07-18 16:59:32 -0700689 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
690 return 0;
691}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000692
Stephen Boyd22006992013-07-18 16:59:32 -0700693static int arch_timer_set_next_event_phys_mem(unsigned long evt,
694 struct clock_event_device *clk)
695{
696 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
697 return 0;
698}
699
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200700static void __arch_timer_setup(unsigned type,
701 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700702{
703 clk->features = CLOCK_EVT_FEAT_ONESHOT;
704
Fu Wei8a5c21d2017-01-18 21:25:26 +0800705 if (type == ARCH_TIMER_TYPE_CP15) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100706 if (arch_timer_c3stop)
707 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700708 clk->name = "arch_sys_timer";
709 clk->rating = 450;
710 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000711 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
712 switch (arch_timer_uses_ppi) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800713 case ARCH_TIMER_VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530714 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530715 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700716 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000717 break;
Fu Weiee34f1e2017-01-18 21:25:27 +0800718 case ARCH_TIMER_PHYS_SECURE_PPI:
719 case ARCH_TIMER_PHYS_NONSECURE_PPI:
720 case ARCH_TIMER_HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530721 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530722 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700723 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000724 break;
725 default:
726 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700727 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500728
Marc Zyngier00640302017-03-20 16:47:59 +0000729 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
Stephen Boyd22006992013-07-18 16:59:32 -0700730 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800731 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700732 clk->name = "arch_mem_timer";
733 clk->rating = 400;
734 clk->cpumask = cpu_all_mask;
735 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530736 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530737 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700738 clk->set_next_event =
739 arch_timer_set_next_event_virt_mem;
740 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530741 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530742 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700743 clk->set_next_event =
744 arch_timer_set_next_event_phys_mem;
745 }
746 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000747
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530748 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000749
Stephen Boyd22006992013-07-18 16:59:32 -0700750 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
751}
752
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200753static void arch_timer_evtstrm_enable(int divider)
754{
755 u32 cntkctl = arch_timer_get_cntkctl();
756
757 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
758 /* Set the divider and enable virtual event stream */
759 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
760 | ARCH_TIMER_VIRT_EVT_EN;
761 arch_timer_set_cntkctl(cntkctl);
762 elf_hwcap |= HWCAP_EVTSTRM;
763#ifdef CONFIG_COMPAT
764 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
765#endif
766}
767
Will Deacon037f6372013-08-23 15:32:29 +0100768static void arch_timer_configure_evtstream(void)
769{
770 int evt_stream_div, pos;
771
772 /* Find the closest power of two to the divisor */
773 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
774 pos = fls(evt_stream_div);
775 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
776 pos--;
777 /* enable event stream */
778 arch_timer_evtstrm_enable(min(pos, 15));
779}
780
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200781static void arch_counter_set_user_access(void)
782{
783 u32 cntkctl = arch_timer_get_cntkctl();
784
Marc Zyngiera86bd132017-02-01 12:07:15 +0000785 /* Disable user access to the timers and both counters */
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200786 /* Also disable virtual event stream */
787 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
788 | ARCH_TIMER_USR_VT_ACCESS_EN
Marc Zyngiera86bd132017-02-01 12:07:15 +0000789 | ARCH_TIMER_USR_VCT_ACCESS_EN
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200790 | ARCH_TIMER_VIRT_EVT_EN
791 | ARCH_TIMER_USR_PCT_ACCESS_EN);
792
Marc Zyngiera86bd132017-02-01 12:07:15 +0000793 /*
794 * Enable user access to the virtual counter if it doesn't
795 * need to be workaround. The vdso may have been already
796 * disabled though.
797 */
798 if (arch_timer_this_cpu_has_cntvct_wa())
799 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
800 else
801 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200802
803 arch_timer_set_cntkctl(cntkctl);
804}
805
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000806static bool arch_timer_has_nonsecure_ppi(void)
807{
Fu Weiee34f1e2017-01-18 21:25:27 +0800808 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
809 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000810}
811
Marc Zyngierf005bd72016-08-01 10:54:15 +0100812static u32 check_ppi_trigger(int irq)
813{
814 u32 flags = irq_get_trigger_type(irq);
815
816 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
817 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
818 pr_warn("WARNING: Please fix your firmware\n");
819 flags = IRQF_TRIGGER_LOW;
820 }
821
822 return flags;
823}
824
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000825static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000826{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000827 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100828 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000829
Fu Wei8a5c21d2017-01-18 21:25:26 +0800830 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000831
Marc Zyngierf005bd72016-08-01 10:54:15 +0100832 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
833 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000834
Marc Zyngierf005bd72016-08-01 10:54:15 +0100835 if (arch_timer_has_nonsecure_ppi()) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800836 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
837 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
838 flags);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100839 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000840
841 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100842 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100843 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000844
845 return 0;
846}
847
Fu Wei5d3dfa92017-03-22 00:31:13 +0800848/*
849 * For historical reasons, when probing with DT we use whichever (non-zero)
850 * rate was probed first, and don't verify that others match. If the first node
851 * probed has a clock-frequency property, this overrides the HW register.
852 */
853static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000854{
Stephen Boyd22006992013-07-18 16:59:32 -0700855 /* Who has more than one independent system counter? */
856 if (arch_timer_rate)
857 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000858
Fu Wei5d3dfa92017-03-22 00:31:13 +0800859 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
860 arch_timer_rate = rate;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000861
Stephen Boyd22006992013-07-18 16:59:32 -0700862 /* Check the timer frequency. */
863 if (arch_timer_rate == 0)
Fu Weided24012017-01-18 21:25:25 +0800864 pr_warn("frequency not available\n");
Stephen Boyd22006992013-07-18 16:59:32 -0700865}
866
867static void arch_timer_banner(unsigned type)
868{
Fu Weided24012017-01-18 21:25:25 +0800869 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
Fu Wei8a5c21d2017-01-18 21:25:26 +0800870 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
871 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
872 " and " : "",
873 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
Fu Weided24012017-01-18 21:25:25 +0800874 (unsigned long)arch_timer_rate / 1000000,
875 (unsigned long)(arch_timer_rate / 10000) % 100,
Fu Wei8a5c21d2017-01-18 21:25:26 +0800876 type & ARCH_TIMER_TYPE_CP15 ?
Fu Weiee34f1e2017-01-18 21:25:27 +0800877 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700878 "",
Fu Wei8a5c21d2017-01-18 21:25:26 +0800879 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
880 type & ARCH_TIMER_TYPE_MEM ?
Stephen Boyd22006992013-07-18 16:59:32 -0700881 arch_timer_mem_use_virtual ? "virt" : "phys" :
882 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000883}
884
885u32 arch_timer_get_rate(void)
886{
887 return arch_timer_rate;
888}
889
Stephen Boyd22006992013-07-18 16:59:32 -0700890static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000891{
Stephen Boyd22006992013-07-18 16:59:32 -0700892 u32 vct_lo, vct_hi, tmp_hi;
893
894 do {
895 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
896 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
897 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
898 } while (vct_hi != tmp_hi);
899
900 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000901}
902
Julien Grallb4d6ce92016-04-11 16:32:51 +0100903static struct arch_timer_kvm_info arch_timer_kvm_info;
904
905struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
906{
907 return &arch_timer_kvm_info;
908}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000909
Stephen Boyd22006992013-07-18 16:59:32 -0700910static void __init arch_counter_register(unsigned type)
911{
912 u64 start_count;
913
914 /* Register the CP15 based counter if we have one */
Fu Wei8a5c21d2017-01-18 21:25:26 +0800915 if (type & ARCH_TIMER_TYPE_CP15) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800916 if (IS_ENABLED(CONFIG_ARM64) ||
917 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800918 arch_timer_read_counter = arch_counter_get_cntvct;
919 else
920 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -0500921
Marc Zyngiera86bd132017-02-01 12:07:15 +0000922 clocksource_counter.archdata.vdso_direct = vdso_default;
Nathan Lynch423bd692014-09-29 01:50:06 +0200923 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700924 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +0200925 }
926
Brian Norrisd8ec7592016-10-04 11:12:09 -0700927 if (!arch_counter_suspend_stop)
928 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700929 start_count = arch_timer_read_counter();
930 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
931 cyclecounter.mult = clocksource_counter.mult;
932 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100933 timecounter_init(&arch_timer_kvm_info.timecounter,
934 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200935
936 /* 56 bits minimum, so we assume worst case rollover */
937 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700938}
939
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400940static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000941{
Fu Weided24012017-01-18 21:25:25 +0800942 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000943
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000944 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
945 if (arch_timer_has_nonsecure_ppi())
Fu Weiee34f1e2017-01-18 21:25:27 +0800946 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000947
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530948 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000949}
950
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000951static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000952{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000953 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000954
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000955 arch_timer_stop(clk);
956 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000957}
958
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100959#ifdef CONFIG_CPU_PM
Marc Zyngierbee67c52017-04-04 17:05:16 +0100960static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100961static int arch_timer_cpu_pm_notify(struct notifier_block *self,
962 unsigned long action, void *hcpu)
963{
964 if (action == CPU_PM_ENTER)
Marc Zyngierbee67c52017-04-04 17:05:16 +0100965 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100966 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
Marc Zyngierbee67c52017-04-04 17:05:16 +0100967 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100968 return NOTIFY_OK;
969}
970
971static struct notifier_block arch_timer_cpu_pm_notifier = {
972 .notifier_call = arch_timer_cpu_pm_notify,
973};
974
975static int __init arch_timer_cpu_pm_init(void)
976{
977 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
978}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000979
980static void __init arch_timer_cpu_pm_deinit(void)
981{
982 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
983}
984
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100985#else
986static int __init arch_timer_cpu_pm_init(void)
987{
988 return 0;
989}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000990
991static void __init arch_timer_cpu_pm_deinit(void)
992{
993}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100994#endif
995
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000996static int __init arch_timer_register(void)
997{
998 int err;
999 int ppi;
1000
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001001 arch_timer_evt = alloc_percpu(struct clock_event_device);
1002 if (!arch_timer_evt) {
1003 err = -ENOMEM;
1004 goto out;
1005 }
1006
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001007 ppi = arch_timer_ppi[arch_timer_uses_ppi];
1008 switch (arch_timer_uses_ppi) {
Fu Weiee34f1e2017-01-18 21:25:27 +08001009 case ARCH_TIMER_VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001010 err = request_percpu_irq(ppi, arch_timer_handler_virt,
1011 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001012 break;
Fu Weiee34f1e2017-01-18 21:25:27 +08001013 case ARCH_TIMER_PHYS_SECURE_PPI:
1014 case ARCH_TIMER_PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001015 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1016 "arch_timer", arch_timer_evt);
Fu Wei4502b6b2017-01-18 21:25:30 +08001017 if (!err && arch_timer_has_nonsecure_ppi()) {
Fu Weiee34f1e2017-01-18 21:25:27 +08001018 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001019 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1020 "arch_timer", arch_timer_evt);
1021 if (err)
Fu Weiee34f1e2017-01-18 21:25:27 +08001022 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001023 arch_timer_evt);
1024 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001025 break;
Fu Weiee34f1e2017-01-18 21:25:27 +08001026 case ARCH_TIMER_HYP_PPI:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001027 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1028 "arch_timer", arch_timer_evt);
1029 break;
1030 default:
1031 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001032 }
1033
1034 if (err) {
Fu Weided24012017-01-18 21:25:25 +08001035 pr_err("can't register interrupt %d (%d)\n", ppi, err);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001036 goto out_free;
1037 }
1038
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001039 err = arch_timer_cpu_pm_init();
1040 if (err)
1041 goto out_unreg_notify;
1042
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001043
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001044 /* Register and immediately configure the timer on the boot CPU */
1045 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001046 "clockevents/arm/arch_timer:starting",
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001047 arch_timer_starting_cpu, arch_timer_dying_cpu);
1048 if (err)
1049 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001050 return 0;
1051
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001052out_unreg_cpupm:
1053 arch_timer_cpu_pm_deinit();
1054
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001055out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001056 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1057 if (arch_timer_has_nonsecure_ppi())
Fu Weiee34f1e2017-01-18 21:25:27 +08001058 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001059 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001060
1061out_free:
1062 free_percpu(arch_timer_evt);
1063out:
1064 return err;
1065}
1066
Stephen Boyd22006992013-07-18 16:59:32 -07001067static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1068{
1069 int ret;
1070 irq_handler_t func;
1071 struct arch_timer *t;
1072
1073 t = kzalloc(sizeof(*t), GFP_KERNEL);
1074 if (!t)
1075 return -ENOMEM;
1076
1077 t->base = base;
1078 t->evt.irq = irq;
Fu Wei8a5c21d2017-01-18 21:25:26 +08001079 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
Stephen Boyd22006992013-07-18 16:59:32 -07001080
1081 if (arch_timer_mem_use_virtual)
1082 func = arch_timer_handler_virt_mem;
1083 else
1084 func = arch_timer_handler_phys_mem;
1085
1086 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1087 if (ret) {
Fu Weided24012017-01-18 21:25:25 +08001088 pr_err("Failed to request mem timer irq\n");
Stephen Boyd22006992013-07-18 16:59:32 -07001089 kfree(t);
1090 }
1091
1092 return ret;
1093}
1094
1095static const struct of_device_id arch_timer_of_match[] __initconst = {
1096 { .compatible = "arm,armv7-timer", },
1097 { .compatible = "arm,armv8-timer", },
1098 {},
1099};
1100
1101static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1102 { .compatible = "arm,armv7-timer-mem", },
1103 {},
1104};
1105
Fu Wei13bf6992017-03-22 00:31:14 +08001106static bool __init arch_timer_needs_of_probing(void)
Sudeep Hollac387f072014-09-29 01:50:05 +02001107{
1108 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001109 bool needs_probing = false;
Fu Wei13bf6992017-03-22 00:31:14 +08001110 unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
Sudeep Hollac387f072014-09-29 01:50:05 +02001111
Fu Wei13bf6992017-03-22 00:31:14 +08001112 /* We have two timers, and both device-tree nodes are probed. */
1113 if ((arch_timers_present & mask) == mask)
1114 return false;
1115
1116 /*
1117 * Only one type of timer is probed,
1118 * check if we have another type of timer node in device-tree.
1119 */
1120 if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1121 dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1122 else
1123 dn = of_find_matching_node(NULL, arch_timer_of_match);
1124
1125 if (dn && of_device_is_available(dn))
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001126 needs_probing = true;
Fu Wei13bf6992017-03-22 00:31:14 +08001127
Sudeep Hollac387f072014-09-29 01:50:05 +02001128 of_node_put(dn);
1129
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001130 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +02001131}
1132
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001133static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -07001134{
Stephen Boyd22006992013-07-18 16:59:32 -07001135 arch_timer_banner(arch_timers_present);
1136 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001137 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -07001138}
1139
Fu Wei4502b6b2017-01-18 21:25:30 +08001140/**
1141 * arch_timer_select_ppi() - Select suitable PPI for the current system.
1142 *
1143 * If HYP mode is available, we know that the physical timer
1144 * has been configured to be accessible from PL1. Use it, so
1145 * that a guest can use the virtual timer instead.
1146 *
1147 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1148 * accesses to CNTP_*_EL1 registers are silently redirected to
1149 * their CNTHP_*_EL2 counterparts, and use a different PPI
1150 * number.
1151 *
1152 * If no interrupt provided for virtual timer, we'll have to
1153 * stick to the physical timer. It'd better be accessible...
1154 * For arm64 we never use the secure interrupt.
1155 *
1156 * Return: a suitable PPI type for the current system.
1157 */
1158static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1159{
1160 if (is_kernel_in_hyp_mode())
1161 return ARCH_TIMER_HYP_PPI;
1162
1163 if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1164 return ARCH_TIMER_VIRT_PPI;
1165
1166 if (IS_ENABLED(CONFIG_ARM64))
1167 return ARCH_TIMER_PHYS_NONSECURE_PPI;
1168
1169 return ARCH_TIMER_PHYS_SECURE_PPI;
1170}
1171
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001172static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001173{
Fu Weica0e1b52017-03-22 00:31:15 +08001174 int i, ret;
Fu Wei5d3dfa92017-03-22 00:31:13 +08001175 u32 rate;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001176
Fu Wei8a5c21d2017-01-18 21:25:26 +08001177 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
Fu Weided24012017-01-18 21:25:25 +08001178 pr_warn("multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001179 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001180 }
1181
Fu Wei8a5c21d2017-01-18 21:25:26 +08001182 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
Fu Weiee34f1e2017-01-18 21:25:27 +08001183 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001184 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1185
Fu Weica0e1b52017-03-22 00:31:15 +08001186 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1187
Fu Weic389d702017-04-01 01:51:00 +08001188 rate = arch_timer_get_cntfrq();
Fu Wei5d3dfa92017-03-22 00:31:13 +08001189 arch_timer_of_configure_rate(rate, np);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001190
1191 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1192
Marc Zyngier651bb2e2017-01-19 17:20:59 +00001193 /* Check for globally applicable workarounds */
1194 arch_timer_check_ool_workaround(ate_match_dt, np);
Scott Woodf6dc1572016-09-22 03:35:17 -05001195
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001196 /*
1197 * If we cannot rely on firmware initializing the timer registers then
1198 * we should use the physical timers instead.
1199 */
1200 if (IS_ENABLED(CONFIG_ARM) &&
1201 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Fu Weiee34f1e2017-01-18 21:25:27 +08001202 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
Fu Wei4502b6b2017-01-18 21:25:30 +08001203 else
1204 arch_timer_uses_ppi = arch_timer_select_ppi();
1205
1206 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1207 pr_err("No interrupt available, giving up\n");
1208 return -EINVAL;
1209 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001210
Brian Norrisd8ec7592016-10-04 11:12:09 -07001211 /* On some systems, the counter stops ticking when in suspend. */
1212 arch_counter_suspend_stop = of_property_read_bool(np,
1213 "arm,no-tick-in-suspend");
1214
Fu Weica0e1b52017-03-22 00:31:15 +08001215 ret = arch_timer_register();
1216 if (ret)
1217 return ret;
1218
1219 if (arch_timer_needs_of_probing())
1220 return 0;
1221
1222 return arch_timer_common_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001223}
Daniel Lezcano17273392017-05-26 16:56:11 +02001224TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1225TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -07001226
Fu Weic389d702017-04-01 01:51:00 +08001227static u32 __init
1228arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
Stephen Boyd22006992013-07-18 16:59:32 -07001229{
Fu Weic389d702017-04-01 01:51:00 +08001230 void __iomem *base;
1231 u32 rate;
Stephen Boyd22006992013-07-18 16:59:32 -07001232
Fu Weic389d702017-04-01 01:51:00 +08001233 base = ioremap(frame->cntbase, frame->size);
1234 if (!base) {
1235 pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1236 return 0;
1237 }
1238
Frank Rowand3db12002017-06-09 17:26:32 -07001239 rate = readl_relaxed(base + CNTFRQ);
Fu Weic389d702017-04-01 01:51:00 +08001240
Frank Rowand3db12002017-06-09 17:26:32 -07001241 iounmap(base);
Fu Weic389d702017-04-01 01:51:00 +08001242
1243 return rate;
1244}
1245
1246static struct arch_timer_mem_frame * __init
1247arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1248{
1249 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1250 void __iomem *cntctlbase;
1251 u32 cnttidr;
1252 int i;
1253
1254 cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
Stephen Boyd22006992013-07-18 16:59:32 -07001255 if (!cntctlbase) {
Fu Weic389d702017-04-01 01:51:00 +08001256 pr_err("Can't map CNTCTLBase @ %pa\n",
1257 &timer_mem->cntctlbase);
1258 return NULL;
Stephen Boyd22006992013-07-18 16:59:32 -07001259 }
1260
1261 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -07001262
1263 /*
1264 * Try to find a virtual capable frame. Otherwise fall back to a
1265 * physical capable frame.
1266 */
Fu Weic389d702017-04-01 01:51:00 +08001267 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1268 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1269 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
Stephen Boyd22006992013-07-18 16:59:32 -07001270
Fu Weic389d702017-04-01 01:51:00 +08001271 frame = &timer_mem->frame[i];
1272 if (!frame->valid)
1273 continue;
Stephen Boyd22006992013-07-18 16:59:32 -07001274
Robin Murphye392d602016-02-01 12:00:48 +00001275 /* Try enabling everything, and see what sticks */
Fu Weic389d702017-04-01 01:51:00 +08001276 writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1277 cntacr = readl_relaxed(cntctlbase + CNTACR(i));
Robin Murphye392d602016-02-01 12:00:48 +00001278
Fu Weic389d702017-04-01 01:51:00 +08001279 if ((cnttidr & CNTTIDR_VIRT(i)) &&
Robin Murphye392d602016-02-01 12:00:48 +00001280 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -07001281 best_frame = frame;
1282 arch_timer_mem_use_virtual = true;
1283 break;
1284 }
Robin Murphye392d602016-02-01 12:00:48 +00001285
1286 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1287 continue;
1288
Fu Weic389d702017-04-01 01:51:00 +08001289 best_frame = frame;
Stephen Boyd22006992013-07-18 16:59:32 -07001290 }
1291
Fu Weic389d702017-04-01 01:51:00 +08001292 iounmap(cntctlbase);
1293
1294 if (!best_frame)
1295 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1296 &timer_mem->cntctlbase);
1297
Sudeep Hollaf63d9472017-05-08 13:32:27 +01001298 return best_frame;
Fu Weic389d702017-04-01 01:51:00 +08001299}
1300
1301static int __init
1302arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1303{
1304 void __iomem *base;
1305 int ret, irq = 0;
Stephen Boyd22006992013-07-18 16:59:32 -07001306
1307 if (arch_timer_mem_use_virtual)
Fu Weic389d702017-04-01 01:51:00 +08001308 irq = frame->virt_irq;
Stephen Boyd22006992013-07-18 16:59:32 -07001309 else
Fu Weic389d702017-04-01 01:51:00 +08001310 irq = frame->phys_irq;
Robin Murphye392d602016-02-01 12:00:48 +00001311
Stephen Boyd22006992013-07-18 16:59:32 -07001312 if (!irq) {
Fu Weided24012017-01-18 21:25:25 +08001313 pr_err("Frame missing %s irq.\n",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001314 arch_timer_mem_use_virtual ? "virt" : "phys");
Fu Weic389d702017-04-01 01:51:00 +08001315 return -EINVAL;
1316 }
1317
1318 if (!request_mem_region(frame->cntbase, frame->size,
1319 "arch_mem_timer"))
1320 return -EBUSY;
1321
1322 base = ioremap(frame->cntbase, frame->size);
1323 if (!base) {
1324 pr_err("Can't map frame's registers\n");
1325 return -ENXIO;
1326 }
1327
1328 ret = arch_timer_mem_register(base, irq);
1329 if (ret) {
1330 iounmap(base);
1331 return ret;
1332 }
1333
1334 arch_counter_base = base;
1335 arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1336
1337 return 0;
1338}
1339
1340static int __init arch_timer_mem_of_init(struct device_node *np)
1341{
1342 struct arch_timer_mem *timer_mem;
1343 struct arch_timer_mem_frame *frame;
1344 struct device_node *frame_node;
1345 struct resource res;
1346 int ret = -EINVAL;
1347 u32 rate;
1348
1349 timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1350 if (!timer_mem)
1351 return -ENOMEM;
1352
1353 if (of_address_to_resource(np, 0, &res))
1354 goto out;
1355 timer_mem->cntctlbase = res.start;
1356 timer_mem->size = resource_size(&res);
1357
1358 for_each_available_child_of_node(np, frame_node) {
1359 u32 n;
1360 struct arch_timer_mem_frame *frame;
1361
1362 if (of_property_read_u32(frame_node, "frame-number", &n)) {
1363 pr_err(FW_BUG "Missing frame-number.\n");
1364 of_node_put(frame_node);
1365 goto out;
1366 }
1367 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1368 pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1369 ARCH_TIMER_MEM_MAX_FRAMES - 1);
1370 of_node_put(frame_node);
1371 goto out;
1372 }
1373 frame = &timer_mem->frame[n];
1374
1375 if (frame->valid) {
1376 pr_err(FW_BUG "Duplicated frame-number.\n");
1377 of_node_put(frame_node);
1378 goto out;
1379 }
1380
1381 if (of_address_to_resource(frame_node, 0, &res)) {
1382 of_node_put(frame_node);
1383 goto out;
1384 }
1385 frame->cntbase = res.start;
1386 frame->size = resource_size(&res);
1387
1388 frame->virt_irq = irq_of_parse_and_map(frame_node,
1389 ARCH_TIMER_VIRT_SPI);
1390 frame->phys_irq = irq_of_parse_and_map(frame_node,
1391 ARCH_TIMER_PHYS_SPI);
1392
1393 frame->valid = true;
1394 }
1395
1396 frame = arch_timer_mem_find_best_frame(timer_mem);
1397 if (!frame) {
1398 ret = -EINVAL;
Robin Murphye392d602016-02-01 12:00:48 +00001399 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001400 }
1401
Fu Weic389d702017-04-01 01:51:00 +08001402 rate = arch_timer_mem_frame_get_cntfrq(frame);
Fu Wei5d3dfa92017-03-22 00:31:13 +08001403 arch_timer_of_configure_rate(rate, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001404
Fu Weic389d702017-04-01 01:51:00 +08001405 ret = arch_timer_mem_frame_register(frame);
1406 if (!ret && !arch_timer_needs_of_probing())
Fu Weica0e1b52017-03-22 00:31:15 +08001407 ret = arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001408out:
Fu Weic389d702017-04-01 01:51:00 +08001409 kfree(timer_mem);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001410 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001411}
Daniel Lezcano17273392017-05-26 16:56:11 +02001412TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Fu Weic389d702017-04-01 01:51:00 +08001413 arch_timer_mem_of_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001414
Fu Weif79d2092017-04-01 01:51:02 +08001415#ifdef CONFIG_ACPI_GTDT
Fu Weic2743a32017-04-01 01:51:04 +08001416static int __init
1417arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1418{
1419 struct arch_timer_mem_frame *frame;
1420 u32 rate;
1421 int i;
1422
1423 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1424 frame = &timer_mem->frame[i];
1425
1426 if (!frame->valid)
1427 continue;
1428
1429 rate = arch_timer_mem_frame_get_cntfrq(frame);
1430 if (rate == arch_timer_rate)
1431 continue;
1432
1433 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1434 &frame->cntbase,
1435 (unsigned long)rate, (unsigned long)arch_timer_rate);
1436
1437 return -EINVAL;
1438 }
1439
1440 return 0;
1441}
1442
1443static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1444{
1445 struct arch_timer_mem *timers, *timer;
1446 struct arch_timer_mem_frame *frame;
1447 int timer_count, i, ret = 0;
1448
1449 timers = kcalloc(platform_timer_count, sizeof(*timers),
1450 GFP_KERNEL);
1451 if (!timers)
1452 return -ENOMEM;
1453
1454 ret = acpi_arch_timer_mem_init(timers, &timer_count);
1455 if (ret || !timer_count)
1456 goto out;
1457
1458 for (i = 0; i < timer_count; i++) {
1459 ret = arch_timer_mem_verify_cntfrq(&timers[i]);
1460 if (ret) {
1461 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1462 goto out;
1463 }
1464 }
1465
1466 /*
1467 * While unlikely, it's theoretically possible that none of the frames
1468 * in a timer expose the combination of feature we want.
1469 */
Matthias Kaehlcked197f792017-07-31 11:37:28 -07001470 for (i = 0; i < timer_count; i++) {
Fu Weic2743a32017-04-01 01:51:04 +08001471 timer = &timers[i];
1472
1473 frame = arch_timer_mem_find_best_frame(timer);
1474 if (frame)
1475 break;
1476 }
1477
1478 if (frame)
1479 ret = arch_timer_mem_frame_register(frame);
1480out:
1481 kfree(timers);
1482 return ret;
1483}
1484
1485/* Initialize per-processor generic timer and memory-mapped timer(if present) */
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001486static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1487{
Fu Weic2743a32017-04-01 01:51:04 +08001488 int ret, platform_timer_count;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001489
Fu Wei8a5c21d2017-01-18 21:25:26 +08001490 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
Fu Weided24012017-01-18 21:25:25 +08001491 pr_warn("already initialized, skipping\n");
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001492 return -EINVAL;
1493 }
1494
Fu Wei8a5c21d2017-01-18 21:25:26 +08001495 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001496
Fu Weic2743a32017-04-01 01:51:04 +08001497 ret = acpi_gtdt_init(table, &platform_timer_count);
Fu Weif79d2092017-04-01 01:51:02 +08001498 if (ret) {
1499 pr_err("Failed to init GTDT table.\n");
1500 return ret;
1501 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001502
Fu Weiee34f1e2017-01-18 21:25:27 +08001503 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001504 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001505
Fu Weiee34f1e2017-01-18 21:25:27 +08001506 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001507 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001508
Fu Weiee34f1e2017-01-18 21:25:27 +08001509 arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001510 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001511
Fu Weica0e1b52017-03-22 00:31:15 +08001512 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1513
Fu Wei5d3dfa92017-03-22 00:31:13 +08001514 /*
1515 * When probing via ACPI, we have no mechanism to override the sysreg
1516 * CNTFRQ value. This *must* be correct.
1517 */
1518 arch_timer_rate = arch_timer_get_cntfrq();
1519 if (!arch_timer_rate) {
1520 pr_err(FW_BUG "frequency not available.\n");
1521 return -EINVAL;
1522 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001523
Fu Wei4502b6b2017-01-18 21:25:30 +08001524 arch_timer_uses_ppi = arch_timer_select_ppi();
1525 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1526 pr_err("No interrupt available, giving up\n");
1527 return -EINVAL;
1528 }
1529
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001530 /* Always-on capability */
Fu Weif79d2092017-04-01 01:51:02 +08001531 arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001532
Marc Zyngier5a38bca2017-02-21 14:37:30 +00001533 /* Check for globally applicable workarounds */
1534 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1535
Fu Weica0e1b52017-03-22 00:31:15 +08001536 ret = arch_timer_register();
1537 if (ret)
1538 return ret;
1539
Fu Weic2743a32017-04-01 01:51:04 +08001540 if (platform_timer_count &&
1541 arch_timer_mem_acpi_init(platform_timer_count))
1542 pr_err("Failed to initialize memory-mapped timer.\n");
1543
Fu Weica0e1b52017-03-22 00:31:15 +08001544 return arch_timer_common_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001545}
Daniel Lezcano77d62f52017-05-26 17:42:25 +02001546TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001547#endif