blob: 94e355bb0d3871e050feb2ccd69fc8e4a7185893 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010027#include <linux/sched/clock.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070028#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000029#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000030
31#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000032#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000033
34#include <clocksource/arm_arch_timer.h>
35
Fu Weided24012017-01-18 21:25:25 +080036#undef pr_fmt
37#define pr_fmt(fmt) "arch_timer: " fmt
38
Stephen Boyd22006992013-07-18 16:59:32 -070039#define CNTTIDR 0x08
40#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
41
Robin Murphye392d602016-02-01 12:00:48 +000042#define CNTACR(n) (0x40 + ((n) * 4))
43#define CNTACR_RPCT BIT(0)
44#define CNTACR_RVCT BIT(1)
45#define CNTACR_RFRQ BIT(2)
46#define CNTACR_RVOFF BIT(3)
47#define CNTACR_RWVT BIT(4)
48#define CNTACR_RWPT BIT(5)
49
Stephen Boyd22006992013-07-18 16:59:32 -070050#define CNTVCT_LO 0x08
51#define CNTVCT_HI 0x0c
52#define CNTFRQ 0x10
53#define CNTP_TVAL 0x28
54#define CNTP_CTL 0x2c
55#define CNTV_TVAL 0x38
56#define CNTV_CTL 0x3c
57
Fu Wei8a5c21d2017-01-18 21:25:26 +080058#define ARCH_TIMER_TYPE_CP15 BIT(0)
59#define ARCH_TIMER_TYPE_MEM BIT(1)
Stephen Boyd22006992013-07-18 16:59:32 -070060static unsigned arch_timers_present __initdata;
61
62static void __iomem *arch_counter_base;
63
64struct arch_timer {
65 void __iomem *base;
66 struct clock_event_device evt;
67};
68
69#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
70
Mark Rutland8a4da6e2012-11-12 14:33:44 +000071static u32 arch_timer_rate;
72
Fu Weiee34f1e2017-01-18 21:25:27 +080073enum arch_timer_ppi_nr {
74 ARCH_TIMER_PHYS_SECURE_PPI,
75 ARCH_TIMER_PHYS_NONSECURE_PPI,
76 ARCH_TIMER_VIRT_PPI,
77 ARCH_TIMER_HYP_PPI,
78 ARCH_TIMER_MAX_TIMER_PPI
Mark Rutland8a4da6e2012-11-12 14:33:44 +000079};
80
Fu Weiee34f1e2017-01-18 21:25:27 +080081static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
Mark Rutland8a4da6e2012-11-12 14:33:44 +000082
83static struct clock_event_device __percpu *arch_timer_evt;
84
Fu Weiee34f1e2017-01-18 21:25:27 +080085static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010086static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070087static bool arch_timer_mem_use_virtual;
Brian Norrisd8ec7592016-10-04 11:12:09 -070088static bool arch_counter_suspend_stop;
Marc Zyngiera86bd132017-02-01 12:07:15 +000089static bool vdso_default = true;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000090
Will Deacon46fd5c62016-06-27 17:30:13 +010091static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
92
93static int __init early_evtstrm_cfg(char *buf)
94{
95 return strtobool(buf, &evtstrm_enable);
96}
97early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
98
Mark Rutland8a4da6e2012-11-12 14:33:44 +000099/*
100 * Architected system timer support.
101 */
102
Marc Zyngierf4e00a12017-01-20 18:28:32 +0000103static __always_inline
104void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
105 struct clock_event_device *clk)
106{
107 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
108 struct arch_timer *timer = to_arch_timer(clk);
109 switch (reg) {
110 case ARCH_TIMER_REG_CTRL:
111 writel_relaxed(val, timer->base + CNTP_CTL);
112 break;
113 case ARCH_TIMER_REG_TVAL:
114 writel_relaxed(val, timer->base + CNTP_TVAL);
115 break;
116 }
117 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
118 struct arch_timer *timer = to_arch_timer(clk);
119 switch (reg) {
120 case ARCH_TIMER_REG_CTRL:
121 writel_relaxed(val, timer->base + CNTV_CTL);
122 break;
123 case ARCH_TIMER_REG_TVAL:
124 writel_relaxed(val, timer->base + CNTV_TVAL);
125 break;
126 }
127 } else {
128 arch_timer_reg_write_cp15(access, reg, val);
129 }
130}
131
132static __always_inline
133u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
134 struct clock_event_device *clk)
135{
136 u32 val;
137
138 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
139 struct arch_timer *timer = to_arch_timer(clk);
140 switch (reg) {
141 case ARCH_TIMER_REG_CTRL:
142 val = readl_relaxed(timer->base + CNTP_CTL);
143 break;
144 case ARCH_TIMER_REG_TVAL:
145 val = readl_relaxed(timer->base + CNTP_TVAL);
146 break;
147 }
148 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
149 struct arch_timer *timer = to_arch_timer(clk);
150 switch (reg) {
151 case ARCH_TIMER_REG_CTRL:
152 val = readl_relaxed(timer->base + CNTV_CTL);
153 break;
154 case ARCH_TIMER_REG_TVAL:
155 val = readl_relaxed(timer->base + CNTV_TVAL);
156 break;
157 }
158 } else {
159 val = arch_timer_reg_read_cp15(access, reg);
160 }
161
162 return val;
163}
164
Marc Zyngier992dd162017-02-01 11:53:46 +0000165/*
166 * Default to cp15 based access because arm64 uses this function for
167 * sched_clock() before DT is probed and the cp15 method is guaranteed
168 * to exist on arm64. arm doesn't use this before DT is probed so even
169 * if we don't have the cp15 accessors we won't have a problem.
170 */
171u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
172
173static u64 arch_counter_read(struct clocksource *cs)
174{
175 return arch_timer_read_counter();
176}
177
178static u64 arch_counter_read_cc(const struct cyclecounter *cc)
179{
180 return arch_timer_read_counter();
181}
182
183static struct clocksource clocksource_counter = {
184 .name = "arch_sys_counter",
185 .rating = 400,
186 .read = arch_counter_read,
187 .mask = CLOCKSOURCE_MASK(56),
188 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
189};
190
191static struct cyclecounter cyclecounter __ro_after_init = {
192 .read = arch_counter_read_cc,
193 .mask = CLOCKSOURCE_MASK(56),
194};
195
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000196struct ate_acpi_oem_info {
197 char oem_id[ACPI_OEM_ID_SIZE + 1];
198 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
199 u32 oem_revision;
200};
201
Scott Woodf6dc1572016-09-22 03:35:17 -0500202#ifdef CONFIG_FSL_ERRATUM_A008585
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000203/*
204 * The number of retries is an arbitrary value well beyond the highest number
205 * of iterations the loop has been observed to take.
206 */
207#define __fsl_a008585_read_reg(reg) ({ \
208 u64 _old, _new; \
209 int _retries = 200; \
210 \
211 do { \
212 _old = read_sysreg(reg); \
213 _new = read_sysreg(reg); \
214 _retries--; \
215 } while (unlikely(_old != _new) && _retries); \
216 \
217 WARN_ON_ONCE(!_retries); \
218 _new; \
219})
Scott Woodf6dc1572016-09-22 03:35:17 -0500220
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000221static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500222{
223 return __fsl_a008585_read_reg(cntp_tval_el0);
224}
225
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000226static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500227{
228 return __fsl_a008585_read_reg(cntv_tval_el0);
229}
230
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000231static u64 notrace fsl_a008585_read_cntvct_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500232{
233 return __fsl_a008585_read_reg(cntvct_el0);
234}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000235#endif
236
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000237#ifdef CONFIG_HISILICON_ERRATUM_161010101
238/*
239 * Verify whether the value of the second read is larger than the first by
240 * less than 32 is the only way to confirm the value is correct, so clear the
241 * lower 5 bits to check whether the difference is greater than 32 or not.
242 * Theoretically the erratum should not occur more than twice in succession
243 * when reading the system counter, but it is possible that some interrupts
244 * may lead to more than twice read errors, triggering the warning, so setting
245 * the number of retries far beyond the number of iterations the loop has been
246 * observed to take.
247 */
248#define __hisi_161010101_read_reg(reg) ({ \
249 u64 _old, _new; \
250 int _retries = 50; \
251 \
252 do { \
253 _old = read_sysreg(reg); \
254 _new = read_sysreg(reg); \
255 _retries--; \
256 } while (unlikely((_new - _old) >> 5) && _retries); \
257 \
258 WARN_ON_ONCE(!_retries); \
259 _new; \
260})
261
262static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
263{
264 return __hisi_161010101_read_reg(cntp_tval_el0);
265}
266
267static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
268{
269 return __hisi_161010101_read_reg(cntv_tval_el0);
270}
271
272static u64 notrace hisi_161010101_read_cntvct_el0(void)
273{
274 return __hisi_161010101_read_reg(cntvct_el0);
275}
Marc Zyngierd003d022017-02-21 15:04:27 +0000276
277static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
278 /*
279 * Note that trailing spaces are required to properly match
280 * the OEM table information.
281 */
282 {
283 .oem_id = "HISI ",
284 .oem_table_id = "HIP05 ",
285 .oem_revision = 0,
286 },
287 {
288 .oem_id = "HISI ",
289 .oem_table_id = "HIP06 ",
290 .oem_revision = 0,
291 },
292 {
293 .oem_id = "HISI ",
294 .oem_table_id = "HIP07 ",
295 .oem_revision = 0,
296 },
297 { /* Sentinel indicating the end of the OEM array */ },
298};
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000299#endif
300
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000301#ifdef CONFIG_ARM64_ERRATUM_858921
302static u64 notrace arm64_858921_read_cntvct_el0(void)
303{
304 u64 old, new;
305
306 old = read_sysreg(cntvct_el0);
307 new = read_sysreg(cntvct_el0);
308 return (((old ^ new) >> 32) & 1) ? old : new;
309}
310#endif
311
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000312#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000313DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
314 timer_unstable_counter_workaround);
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000315EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
316
317DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
318EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
319
Marc Zyngier83280892017-01-27 10:27:09 +0000320static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
321 struct clock_event_device *clk)
322{
323 unsigned long ctrl;
324 u64 cval = evt + arch_counter_get_cntvct();
325
326 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
327 ctrl |= ARCH_TIMER_CTRL_ENABLE;
328 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
329
330 if (access == ARCH_TIMER_PHYS_ACCESS)
331 write_sysreg(cval, cntp_cval_el0);
332 else
333 write_sysreg(cval, cntv_cval_el0);
334
335 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
336}
337
338static int erratum_set_next_event_tval_virt(unsigned long evt,
339 struct clock_event_device *clk)
340{
341 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
342 return 0;
343}
344
345static int erratum_set_next_event_tval_phys(unsigned long evt,
346 struct clock_event_device *clk)
347{
348 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
349 return 0;
350}
351
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000352static const struct arch_timer_erratum_workaround ool_workarounds[] = {
353#ifdef CONFIG_FSL_ERRATUM_A008585
354 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000355 .match_type = ate_match_dt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000356 .id = "fsl,erratum-a008585",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000357 .desc = "Freescale erratum a005858",
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000358 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
359 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
360 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000361 .set_next_event_phys = erratum_set_next_event_tval_phys,
362 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000363 },
364#endif
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000365#ifdef CONFIG_HISILICON_ERRATUM_161010101
366 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000367 .match_type = ate_match_dt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000368 .id = "hisilicon,erratum-161010101",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000369 .desc = "HiSilicon erratum 161010101",
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000370 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
371 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
372 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000373 .set_next_event_phys = erratum_set_next_event_tval_phys,
374 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000375 },
Marc Zyngierd003d022017-02-21 15:04:27 +0000376 {
377 .match_type = ate_match_acpi_oem_info,
378 .id = hisi_161010101_oem_info,
379 .desc = "HiSilicon erratum 161010101",
380 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
381 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
382 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
383 .set_next_event_phys = erratum_set_next_event_tval_phys,
384 .set_next_event_virt = erratum_set_next_event_tval_virt,
385 },
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000386#endif
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000387#ifdef CONFIG_ARM64_ERRATUM_858921
388 {
389 .match_type = ate_match_local_cap_id,
390 .id = (void *)ARM64_WORKAROUND_858921,
391 .desc = "ARM erratum 858921",
392 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
393 },
394#endif
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000395};
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000396
397typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
398 const void *);
399
400static
401bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
402 const void *arg)
403{
404 const struct device_node *np = arg;
405
406 return of_property_read_bool(np, wa->id);
407}
408
Marc Zyngier00640302017-03-20 16:47:59 +0000409static
410bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
411 const void *arg)
412{
413 return this_cpu_has_cap((uintptr_t)wa->id);
414}
415
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000416
417static
418bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
419 const void *arg)
420{
421 static const struct ate_acpi_oem_info empty_oem_info = {};
422 const struct ate_acpi_oem_info *info = wa->id;
423 const struct acpi_table_header *table = arg;
424
425 /* Iterate over the ACPI OEM info array, looking for a match */
426 while (memcmp(info, &empty_oem_info, sizeof(*info))) {
427 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
428 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
429 info->oem_revision == table->oem_revision)
430 return true;
431
432 info++;
433 }
434
435 return false;
436}
437
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000438static const struct arch_timer_erratum_workaround *
439arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
440 ate_match_fn_t match_fn,
441 void *arg)
442{
443 int i;
444
445 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
446 if (ool_workarounds[i].match_type != type)
447 continue;
448
449 if (match_fn(&ool_workarounds[i], arg))
450 return &ool_workarounds[i];
451 }
452
453 return NULL;
454}
455
456static
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000457void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
458 bool local)
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000459{
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000460 int i;
461
462 if (local) {
463 __this_cpu_write(timer_unstable_counter_workaround, wa);
464 } else {
465 for_each_possible_cpu(i)
466 per_cpu(timer_unstable_counter_workaround, i) = wa;
467 }
468
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000469 static_branch_enable(&arch_timer_read_ool_enabled);
Marc Zyngiera86bd132017-02-01 12:07:15 +0000470
471 /*
472 * Don't use the vdso fastpath if errata require using the
473 * out-of-line counter accessor. We may change our mind pretty
474 * late in the game (with a per-CPU erratum, for example), so
475 * change both the default value and the vdso itself.
476 */
477 if (wa->read_cntvct_el0) {
478 clocksource_counter.archdata.vdso_direct = false;
479 vdso_default = false;
480 }
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000481}
482
483static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
484 void *arg)
485{
486 const struct arch_timer_erratum_workaround *wa;
487 ate_match_fn_t match_fn = NULL;
Marc Zyngier00640302017-03-20 16:47:59 +0000488 bool local = false;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000489
490 switch (type) {
491 case ate_match_dt:
492 match_fn = arch_timer_check_dt_erratum;
493 break;
Marc Zyngier00640302017-03-20 16:47:59 +0000494 case ate_match_local_cap_id:
495 match_fn = arch_timer_check_local_cap_erratum;
496 local = true;
497 break;
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000498 case ate_match_acpi_oem_info:
499 match_fn = arch_timer_check_acpi_oem_erratum;
500 break;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000501 default:
502 WARN_ON(1);
503 return;
504 }
505
506 wa = arch_timer_iterate_errata(type, match_fn, arg);
507 if (!wa)
508 return;
509
Marc Zyngier00640302017-03-20 16:47:59 +0000510 if (needs_unstable_timer_counter_workaround()) {
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000511 const struct arch_timer_erratum_workaround *__wa;
512 __wa = __this_cpu_read(timer_unstable_counter_workaround);
513 if (__wa && wa != __wa)
Marc Zyngier00640302017-03-20 16:47:59 +0000514 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000515 wa->desc, __wa->desc);
516
517 if (__wa)
518 return;
Marc Zyngier00640302017-03-20 16:47:59 +0000519 }
520
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000521 arch_timer_enable_workaround(wa, local);
Marc Zyngier00640302017-03-20 16:47:59 +0000522 pr_info("Enabling %s workaround for %s\n",
523 local ? "local" : "global", wa->desc);
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000524}
525
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000526#define erratum_handler(fn, r, ...) \
527({ \
528 bool __val; \
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000529 if (needs_unstable_timer_counter_workaround()) { \
530 const struct arch_timer_erratum_workaround *__wa; \
531 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
532 if (__wa && __wa->fn) { \
533 r = __wa->fn(__VA_ARGS__); \
534 __val = true; \
535 } else { \
536 __val = false; \
537 } \
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000538 } else { \
539 __val = false; \
540 } \
541 __val; \
542})
543
Marc Zyngiera86bd132017-02-01 12:07:15 +0000544static bool arch_timer_this_cpu_has_cntvct_wa(void)
545{
546 const struct arch_timer_erratum_workaround *wa;
547
548 wa = __this_cpu_read(timer_unstable_counter_workaround);
549 return wa && wa->read_cntvct_el0;
550}
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000551#else
552#define arch_timer_check_ool_workaround(t,a) do { } while(0)
Marc Zyngier83280892017-01-27 10:27:09 +0000553#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
554#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000555#define erratum_handler(fn, r, ...) ({false;})
Marc Zyngiera86bd132017-02-01 12:07:15 +0000556#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000557#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500558
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700559static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000560 struct clock_event_device *evt)
561{
562 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200563
Stephen Boyd60faddf2013-07-18 16:59:31 -0700564 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000565 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
566 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700567 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000568 evt->event_handler(evt);
569 return IRQ_HANDLED;
570 }
571
572 return IRQ_NONE;
573}
574
575static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
576{
577 struct clock_event_device *evt = dev_id;
578
579 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
580}
581
582static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
583{
584 struct clock_event_device *evt = dev_id;
585
586 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
587}
588
Stephen Boyd22006992013-07-18 16:59:32 -0700589static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
590{
591 struct clock_event_device *evt = dev_id;
592
593 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
594}
595
596static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
597{
598 struct clock_event_device *evt = dev_id;
599
600 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
601}
602
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530603static __always_inline int timer_shutdown(const int access,
604 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000605{
606 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530607
608 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
609 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
610 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
611
612 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000613}
614
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530615static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000616{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530617 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000618}
619
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530620static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000621{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530622 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000623}
624
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530625static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700626{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530627 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700628}
629
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530630static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700631{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530632 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700633}
634
Stephen Boyd60faddf2013-07-18 16:59:31 -0700635static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200636 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000637{
638 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700639 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000640 ctrl |= ARCH_TIMER_CTRL_ENABLE;
641 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700642 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
643 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000644}
645
646static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700647 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000648{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000649 int ret;
650
651 if (erratum_handler(set_next_event_virt, ret, evt, clk))
652 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000653
Stephen Boyd60faddf2013-07-18 16:59:31 -0700654 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000655 return 0;
656}
657
658static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700659 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000660{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000661 int ret;
662
663 if (erratum_handler(set_next_event_phys, ret, evt, clk))
664 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000665
Stephen Boyd60faddf2013-07-18 16:59:31 -0700666 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000667 return 0;
668}
669
Stephen Boyd22006992013-07-18 16:59:32 -0700670static int arch_timer_set_next_event_virt_mem(unsigned long evt,
671 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000672{
Stephen Boyd22006992013-07-18 16:59:32 -0700673 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
674 return 0;
675}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000676
Stephen Boyd22006992013-07-18 16:59:32 -0700677static int arch_timer_set_next_event_phys_mem(unsigned long evt,
678 struct clock_event_device *clk)
679{
680 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
681 return 0;
682}
683
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200684static void __arch_timer_setup(unsigned type,
685 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700686{
687 clk->features = CLOCK_EVT_FEAT_ONESHOT;
688
Fu Wei8a5c21d2017-01-18 21:25:26 +0800689 if (type == ARCH_TIMER_TYPE_CP15) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100690 if (arch_timer_c3stop)
691 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700692 clk->name = "arch_sys_timer";
693 clk->rating = 450;
694 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000695 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
696 switch (arch_timer_uses_ppi) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800697 case ARCH_TIMER_VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530698 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530699 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700700 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000701 break;
Fu Weiee34f1e2017-01-18 21:25:27 +0800702 case ARCH_TIMER_PHYS_SECURE_PPI:
703 case ARCH_TIMER_PHYS_NONSECURE_PPI:
704 case ARCH_TIMER_HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530705 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530706 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700707 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000708 break;
709 default:
710 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700711 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500712
Marc Zyngier00640302017-03-20 16:47:59 +0000713 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
Stephen Boyd22006992013-07-18 16:59:32 -0700714 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800715 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700716 clk->name = "arch_mem_timer";
717 clk->rating = 400;
718 clk->cpumask = cpu_all_mask;
719 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530720 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530721 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700722 clk->set_next_event =
723 arch_timer_set_next_event_virt_mem;
724 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530725 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530726 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700727 clk->set_next_event =
728 arch_timer_set_next_event_phys_mem;
729 }
730 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000731
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530732 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000733
Stephen Boyd22006992013-07-18 16:59:32 -0700734 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
735}
736
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200737static void arch_timer_evtstrm_enable(int divider)
738{
739 u32 cntkctl = arch_timer_get_cntkctl();
740
741 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
742 /* Set the divider and enable virtual event stream */
743 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
744 | ARCH_TIMER_VIRT_EVT_EN;
745 arch_timer_set_cntkctl(cntkctl);
746 elf_hwcap |= HWCAP_EVTSTRM;
747#ifdef CONFIG_COMPAT
748 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
749#endif
750}
751
Will Deacon037f6372013-08-23 15:32:29 +0100752static void arch_timer_configure_evtstream(void)
753{
754 int evt_stream_div, pos;
755
756 /* Find the closest power of two to the divisor */
757 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
758 pos = fls(evt_stream_div);
759 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
760 pos--;
761 /* enable event stream */
762 arch_timer_evtstrm_enable(min(pos, 15));
763}
764
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200765static void arch_counter_set_user_access(void)
766{
767 u32 cntkctl = arch_timer_get_cntkctl();
768
Marc Zyngiera86bd132017-02-01 12:07:15 +0000769 /* Disable user access to the timers and both counters */
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200770 /* Also disable virtual event stream */
771 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
772 | ARCH_TIMER_USR_VT_ACCESS_EN
Marc Zyngiera86bd132017-02-01 12:07:15 +0000773 | ARCH_TIMER_USR_VCT_ACCESS_EN
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200774 | ARCH_TIMER_VIRT_EVT_EN
775 | ARCH_TIMER_USR_PCT_ACCESS_EN);
776
Marc Zyngiera86bd132017-02-01 12:07:15 +0000777 /*
778 * Enable user access to the virtual counter if it doesn't
779 * need to be workaround. The vdso may have been already
780 * disabled though.
781 */
782 if (arch_timer_this_cpu_has_cntvct_wa())
783 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
784 else
785 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200786
787 arch_timer_set_cntkctl(cntkctl);
788}
789
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000790static bool arch_timer_has_nonsecure_ppi(void)
791{
Fu Weiee34f1e2017-01-18 21:25:27 +0800792 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
793 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000794}
795
Marc Zyngierf005bd72016-08-01 10:54:15 +0100796static u32 check_ppi_trigger(int irq)
797{
798 u32 flags = irq_get_trigger_type(irq);
799
800 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
801 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
802 pr_warn("WARNING: Please fix your firmware\n");
803 flags = IRQF_TRIGGER_LOW;
804 }
805
806 return flags;
807}
808
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000809static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000810{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000811 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100812 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000813
Fu Wei8a5c21d2017-01-18 21:25:26 +0800814 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000815
Marc Zyngierf005bd72016-08-01 10:54:15 +0100816 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
817 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000818
Marc Zyngierf005bd72016-08-01 10:54:15 +0100819 if (arch_timer_has_nonsecure_ppi()) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800820 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
821 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
822 flags);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100823 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000824
825 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100826 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100827 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000828
829 return 0;
830}
831
Stephen Boyd22006992013-07-18 16:59:32 -0700832static void
833arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000834{
Stephen Boyd22006992013-07-18 16:59:32 -0700835 /* Who has more than one independent system counter? */
836 if (arch_timer_rate)
837 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000838
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000839 /*
840 * Try to determine the frequency from the device tree or CNTFRQ,
841 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
842 */
843 if (!acpi_disabled ||
844 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
Stephen Boyd22006992013-07-18 16:59:32 -0700845 if (cntbase)
846 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
847 else
848 arch_timer_rate = arch_timer_get_cntfrq();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000849 }
850
Stephen Boyd22006992013-07-18 16:59:32 -0700851 /* Check the timer frequency. */
852 if (arch_timer_rate == 0)
Fu Weided24012017-01-18 21:25:25 +0800853 pr_warn("frequency not available\n");
Stephen Boyd22006992013-07-18 16:59:32 -0700854}
855
856static void arch_timer_banner(unsigned type)
857{
Fu Weided24012017-01-18 21:25:25 +0800858 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
Fu Wei8a5c21d2017-01-18 21:25:26 +0800859 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
860 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
861 " and " : "",
862 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
Fu Weided24012017-01-18 21:25:25 +0800863 (unsigned long)arch_timer_rate / 1000000,
864 (unsigned long)(arch_timer_rate / 10000) % 100,
Fu Wei8a5c21d2017-01-18 21:25:26 +0800865 type & ARCH_TIMER_TYPE_CP15 ?
Fu Weiee34f1e2017-01-18 21:25:27 +0800866 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700867 "",
Fu Wei8a5c21d2017-01-18 21:25:26 +0800868 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
869 type & ARCH_TIMER_TYPE_MEM ?
Stephen Boyd22006992013-07-18 16:59:32 -0700870 arch_timer_mem_use_virtual ? "virt" : "phys" :
871 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000872}
873
874u32 arch_timer_get_rate(void)
875{
876 return arch_timer_rate;
877}
878
Stephen Boyd22006992013-07-18 16:59:32 -0700879static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000880{
Stephen Boyd22006992013-07-18 16:59:32 -0700881 u32 vct_lo, vct_hi, tmp_hi;
882
883 do {
884 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
885 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
886 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
887 } while (vct_hi != tmp_hi);
888
889 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000890}
891
Julien Grallb4d6ce92016-04-11 16:32:51 +0100892static struct arch_timer_kvm_info arch_timer_kvm_info;
893
894struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
895{
896 return &arch_timer_kvm_info;
897}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000898
Stephen Boyd22006992013-07-18 16:59:32 -0700899static void __init arch_counter_register(unsigned type)
900{
901 u64 start_count;
902
903 /* Register the CP15 based counter if we have one */
Fu Wei8a5c21d2017-01-18 21:25:26 +0800904 if (type & ARCH_TIMER_TYPE_CP15) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800905 if (IS_ENABLED(CONFIG_ARM64) ||
906 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800907 arch_timer_read_counter = arch_counter_get_cntvct;
908 else
909 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -0500910
Marc Zyngiera86bd132017-02-01 12:07:15 +0000911 clocksource_counter.archdata.vdso_direct = vdso_default;
Nathan Lynch423bd692014-09-29 01:50:06 +0200912 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700913 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +0200914 }
915
Brian Norrisd8ec7592016-10-04 11:12:09 -0700916 if (!arch_counter_suspend_stop)
917 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700918 start_count = arch_timer_read_counter();
919 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
920 cyclecounter.mult = clocksource_counter.mult;
921 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100922 timecounter_init(&arch_timer_kvm_info.timecounter,
923 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200924
925 /* 56 bits minimum, so we assume worst case rollover */
926 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700927}
928
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400929static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000930{
Fu Weided24012017-01-18 21:25:25 +0800931 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000932
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000933 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
934 if (arch_timer_has_nonsecure_ppi())
Fu Weiee34f1e2017-01-18 21:25:27 +0800935 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000936
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530937 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000938}
939
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000940static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000941{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000942 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000943
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000944 arch_timer_stop(clk);
945 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000946}
947
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100948#ifdef CONFIG_CPU_PM
Marc Zyngierbee67c52017-04-04 17:05:16 +0100949static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100950static int arch_timer_cpu_pm_notify(struct notifier_block *self,
951 unsigned long action, void *hcpu)
952{
953 if (action == CPU_PM_ENTER)
Marc Zyngierbee67c52017-04-04 17:05:16 +0100954 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100955 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
Marc Zyngierbee67c52017-04-04 17:05:16 +0100956 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100957 return NOTIFY_OK;
958}
959
960static struct notifier_block arch_timer_cpu_pm_notifier = {
961 .notifier_call = arch_timer_cpu_pm_notify,
962};
963
964static int __init arch_timer_cpu_pm_init(void)
965{
966 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
967}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000968
969static void __init arch_timer_cpu_pm_deinit(void)
970{
971 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
972}
973
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100974#else
975static int __init arch_timer_cpu_pm_init(void)
976{
977 return 0;
978}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000979
980static void __init arch_timer_cpu_pm_deinit(void)
981{
982}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100983#endif
984
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000985static int __init arch_timer_register(void)
986{
987 int err;
988 int ppi;
989
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000990 arch_timer_evt = alloc_percpu(struct clock_event_device);
991 if (!arch_timer_evt) {
992 err = -ENOMEM;
993 goto out;
994 }
995
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000996 ppi = arch_timer_ppi[arch_timer_uses_ppi];
997 switch (arch_timer_uses_ppi) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800998 case ARCH_TIMER_VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000999 err = request_percpu_irq(ppi, arch_timer_handler_virt,
1000 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001001 break;
Fu Weiee34f1e2017-01-18 21:25:27 +08001002 case ARCH_TIMER_PHYS_SECURE_PPI:
1003 case ARCH_TIMER_PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001004 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1005 "arch_timer", arch_timer_evt);
Fu Weiee34f1e2017-01-18 21:25:27 +08001006 if (!err && arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]) {
1007 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001008 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1009 "arch_timer", arch_timer_evt);
1010 if (err)
Fu Weiee34f1e2017-01-18 21:25:27 +08001011 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001012 arch_timer_evt);
1013 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001014 break;
Fu Weiee34f1e2017-01-18 21:25:27 +08001015 case ARCH_TIMER_HYP_PPI:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001016 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1017 "arch_timer", arch_timer_evt);
1018 break;
1019 default:
1020 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001021 }
1022
1023 if (err) {
Fu Weided24012017-01-18 21:25:25 +08001024 pr_err("can't register interrupt %d (%d)\n", ppi, err);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001025 goto out_free;
1026 }
1027
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001028 err = arch_timer_cpu_pm_init();
1029 if (err)
1030 goto out_unreg_notify;
1031
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001032
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001033 /* Register and immediately configure the timer on the boot CPU */
1034 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001035 "clockevents/arm/arch_timer:starting",
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001036 arch_timer_starting_cpu, arch_timer_dying_cpu);
1037 if (err)
1038 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001039 return 0;
1040
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001041out_unreg_cpupm:
1042 arch_timer_cpu_pm_deinit();
1043
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001044out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001045 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1046 if (arch_timer_has_nonsecure_ppi())
Fu Weiee34f1e2017-01-18 21:25:27 +08001047 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001048 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001049
1050out_free:
1051 free_percpu(arch_timer_evt);
1052out:
1053 return err;
1054}
1055
Stephen Boyd22006992013-07-18 16:59:32 -07001056static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1057{
1058 int ret;
1059 irq_handler_t func;
1060 struct arch_timer *t;
1061
1062 t = kzalloc(sizeof(*t), GFP_KERNEL);
1063 if (!t)
1064 return -ENOMEM;
1065
1066 t->base = base;
1067 t->evt.irq = irq;
Fu Wei8a5c21d2017-01-18 21:25:26 +08001068 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
Stephen Boyd22006992013-07-18 16:59:32 -07001069
1070 if (arch_timer_mem_use_virtual)
1071 func = arch_timer_handler_virt_mem;
1072 else
1073 func = arch_timer_handler_phys_mem;
1074
1075 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1076 if (ret) {
Fu Weided24012017-01-18 21:25:25 +08001077 pr_err("Failed to request mem timer irq\n");
Stephen Boyd22006992013-07-18 16:59:32 -07001078 kfree(t);
1079 }
1080
1081 return ret;
1082}
1083
1084static const struct of_device_id arch_timer_of_match[] __initconst = {
1085 { .compatible = "arm,armv7-timer", },
1086 { .compatible = "arm,armv8-timer", },
1087 {},
1088};
1089
1090static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1091 { .compatible = "arm,armv7-timer-mem", },
1092 {},
1093};
1094
Sudeep Hollac387f072014-09-29 01:50:05 +02001095static bool __init
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001096arch_timer_needs_probing(int type, const struct of_device_id *matches)
Sudeep Hollac387f072014-09-29 01:50:05 +02001097{
1098 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001099 bool needs_probing = false;
Sudeep Hollac387f072014-09-29 01:50:05 +02001100
1101 dn = of_find_matching_node(NULL, matches);
Marc Zyngier59aa8962014-10-15 16:06:20 +01001102 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001103 needs_probing = true;
Sudeep Hollac387f072014-09-29 01:50:05 +02001104 of_node_put(dn);
1105
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001106 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +02001107}
1108
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001109static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -07001110{
Fu Wei8a5c21d2017-01-18 21:25:26 +08001111 unsigned mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
Stephen Boyd22006992013-07-18 16:59:32 -07001112
1113 /* Wait until both nodes are probed if we have two timers */
1114 if ((arch_timers_present & mask) != mask) {
Fu Wei8a5c21d2017-01-18 21:25:26 +08001115 if (arch_timer_needs_probing(ARCH_TIMER_TYPE_MEM,
1116 arch_timer_mem_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001117 return 0;
Fu Wei8a5c21d2017-01-18 21:25:26 +08001118 if (arch_timer_needs_probing(ARCH_TIMER_TYPE_CP15,
1119 arch_timer_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001120 return 0;
Stephen Boyd22006992013-07-18 16:59:32 -07001121 }
1122
1123 arch_timer_banner(arch_timers_present);
1124 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001125 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -07001126}
1127
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001128static int __init arch_timer_init(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001129{
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001130 int ret;
Doug Anderson65b57322014-10-08 00:33:47 -07001131 /*
Marc Zyngier82668912013-01-10 11:13:07 +00001132 * If HYP mode is available, we know that the physical timer
1133 * has been configured to be accessible from PL1. Use it, so
1134 * that a guest can use the virtual timer instead.
1135 *
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001136 * If no interrupt provided for virtual timer, we'll have to
1137 * stick to the physical timer. It'd better be accessible...
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001138 *
1139 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1140 * accesses to CNTP_*_EL1 registers are silently redirected to
1141 * their CNTHP_*_EL2 counterparts, and use a different PPI
1142 * number.
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001143 */
Fu Weiee34f1e2017-01-18 21:25:27 +08001144 if (is_hyp_mode_available() || !arch_timer_ppi[ARCH_TIMER_VIRT_PPI]) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001145 bool has_ppi;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001146
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001147 if (is_kernel_in_hyp_mode()) {
Fu Weiee34f1e2017-01-18 21:25:27 +08001148 arch_timer_uses_ppi = ARCH_TIMER_HYP_PPI;
1149 has_ppi = !!arch_timer_ppi[ARCH_TIMER_HYP_PPI];
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001150 } else {
Fu Weiee34f1e2017-01-18 21:25:27 +08001151 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1152 has_ppi = (!!arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI] ||
1153 !!arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001154 }
1155
1156 if (!has_ppi) {
Fu Weided24012017-01-18 21:25:25 +08001157 pr_warn("No interrupt available, giving up\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001158 return -EINVAL;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001159 }
1160 }
1161
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001162 ret = arch_timer_register();
1163 if (ret)
1164 return ret;
1165
1166 ret = arch_timer_common_init();
1167 if (ret)
1168 return ret;
Julien Gralld9b5e412016-04-11 16:32:52 +01001169
Fu Weiee34f1e2017-01-18 21:25:27 +08001170 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
Fu Weided24012017-01-18 21:25:25 +08001171
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001172 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001173}
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001174
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001175static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001176{
1177 int i;
1178
Fu Wei8a5c21d2017-01-18 21:25:26 +08001179 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
Fu Weided24012017-01-18 21:25:25 +08001180 pr_warn("multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001181 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001182 }
1183
Fu Wei8a5c21d2017-01-18 21:25:26 +08001184 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
Fu Weiee34f1e2017-01-18 21:25:27 +08001185 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001186 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1187
1188 arch_timer_detect_rate(NULL, np);
1189
1190 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1191
Marc Zyngier651bb2e2017-01-19 17:20:59 +00001192 /* Check for globally applicable workarounds */
1193 arch_timer_check_ool_workaround(ate_match_dt, np);
Scott Woodf6dc1572016-09-22 03:35:17 -05001194
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001195 /*
1196 * If we cannot rely on firmware initializing the timer registers then
1197 * we should use the physical timers instead.
1198 */
1199 if (IS_ENABLED(CONFIG_ARM) &&
1200 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Fu Weiee34f1e2017-01-18 21:25:27 +08001201 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001202
Brian Norrisd8ec7592016-10-04 11:12:09 -07001203 /* On some systems, the counter stops ticking when in suspend. */
1204 arch_counter_suspend_stop = of_property_read_bool(np,
1205 "arm,no-tick-in-suspend");
1206
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001207 return arch_timer_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001208}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001209CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1210CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -07001211
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001212static int __init arch_timer_mem_init(struct device_node *np)
Stephen Boyd22006992013-07-18 16:59:32 -07001213{
1214 struct device_node *frame, *best_frame = NULL;
1215 void __iomem *cntctlbase, *base;
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001216 unsigned int irq, ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001217 u32 cnttidr;
1218
Fu Wei8a5c21d2017-01-18 21:25:26 +08001219 arch_timers_present |= ARCH_TIMER_TYPE_MEM;
Stephen Boyd22006992013-07-18 16:59:32 -07001220 cntctlbase = of_iomap(np, 0);
1221 if (!cntctlbase) {
Fu Weided24012017-01-18 21:25:25 +08001222 pr_err("Can't find CNTCTLBase\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001223 return -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -07001224 }
1225
1226 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -07001227
1228 /*
1229 * Try to find a virtual capable frame. Otherwise fall back to a
1230 * physical capable frame.
1231 */
1232 for_each_available_child_of_node(np, frame) {
1233 int n;
Robin Murphye392d602016-02-01 12:00:48 +00001234 u32 cntacr;
Stephen Boyd22006992013-07-18 16:59:32 -07001235
1236 if (of_property_read_u32(frame, "frame-number", &n)) {
Fu Weided24012017-01-18 21:25:25 +08001237 pr_err("Missing frame-number\n");
Stephen Boyd22006992013-07-18 16:59:32 -07001238 of_node_put(frame);
Robin Murphye392d602016-02-01 12:00:48 +00001239 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001240 }
1241
Robin Murphye392d602016-02-01 12:00:48 +00001242 /* Try enabling everything, and see what sticks */
1243 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1244 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1245 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1246 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1247
1248 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1249 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -07001250 of_node_put(best_frame);
1251 best_frame = frame;
1252 arch_timer_mem_use_virtual = true;
1253 break;
1254 }
Robin Murphye392d602016-02-01 12:00:48 +00001255
1256 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1257 continue;
1258
Stephen Boyd22006992013-07-18 16:59:32 -07001259 of_node_put(best_frame);
1260 best_frame = of_node_get(frame);
1261 }
1262
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001263 ret= -ENXIO;
Stephen Boydf947ee12016-10-26 00:35:50 -07001264 base = arch_counter_base = of_io_request_and_map(best_frame, 0,
1265 "arch_mem_timer");
1266 if (IS_ERR(base)) {
Fu Weided24012017-01-18 21:25:25 +08001267 pr_err("Can't map frame's registers\n");
Robin Murphye392d602016-02-01 12:00:48 +00001268 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001269 }
1270
1271 if (arch_timer_mem_use_virtual)
1272 irq = irq_of_parse_and_map(best_frame, 1);
1273 else
1274 irq = irq_of_parse_and_map(best_frame, 0);
Robin Murphye392d602016-02-01 12:00:48 +00001275
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001276 ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001277 if (!irq) {
Fu Weided24012017-01-18 21:25:25 +08001278 pr_err("Frame missing %s irq.\n",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001279 arch_timer_mem_use_virtual ? "virt" : "phys");
Robin Murphye392d602016-02-01 12:00:48 +00001280 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001281 }
1282
1283 arch_timer_detect_rate(base, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001284 ret = arch_timer_mem_register(base, irq);
1285 if (ret)
1286 goto out;
1287
1288 return arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001289out:
1290 iounmap(cntctlbase);
1291 of_node_put(best_frame);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001292 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001293}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001294CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Stephen Boyd22006992013-07-18 16:59:32 -07001295 arch_timer_mem_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001296
1297#ifdef CONFIG_ACPI
1298static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1299{
1300 int trigger, polarity;
1301
1302 if (!interrupt)
1303 return 0;
1304
1305 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1306 : ACPI_LEVEL_SENSITIVE;
1307
1308 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1309 : ACPI_ACTIVE_HIGH;
1310
1311 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1312}
1313
1314/* Initialize per-processor generic timer */
1315static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1316{
1317 struct acpi_table_gtdt *gtdt;
1318
Fu Wei8a5c21d2017-01-18 21:25:26 +08001319 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
Fu Weided24012017-01-18 21:25:25 +08001320 pr_warn("already initialized, skipping\n");
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001321 return -EINVAL;
1322 }
1323
1324 gtdt = container_of(table, struct acpi_table_gtdt, header);
1325
Fu Wei8a5c21d2017-01-18 21:25:26 +08001326 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001327
Fu Weiee34f1e2017-01-18 21:25:27 +08001328 arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI] =
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001329 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1330 gtdt->secure_el1_flags);
1331
Fu Weiee34f1e2017-01-18 21:25:27 +08001332 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001333 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1334 gtdt->non_secure_el1_flags);
1335
Fu Weiee34f1e2017-01-18 21:25:27 +08001336 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001337 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1338 gtdt->virtual_timer_flags);
1339
Fu Weiee34f1e2017-01-18 21:25:27 +08001340 arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001341 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1342 gtdt->non_secure_el2_flags);
1343
1344 /* Get the frequency from CNTFRQ */
1345 arch_timer_detect_rate(NULL, NULL);
1346
1347 /* Always-on capability */
1348 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1349
Marc Zyngier5a38bca2017-02-21 14:37:30 +00001350 /* Check for globally applicable workarounds */
1351 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1352
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001353 arch_timer_init();
1354 return 0;
1355}
Marc Zyngierae281cb2015-09-28 15:49:17 +01001356CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001357#endif