blob: 5c5c2af74ad997c6b33c76dc99c255c70c8e67d0 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010027#include <linux/sched/clock.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070028#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000029#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000030
31#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000032#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000033
34#include <clocksource/arm_arch_timer.h>
35
Stephen Boyd22006992013-07-18 16:59:32 -070036#define CNTTIDR 0x08
37#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
38
Robin Murphye392d602016-02-01 12:00:48 +000039#define CNTACR(n) (0x40 + ((n) * 4))
40#define CNTACR_RPCT BIT(0)
41#define CNTACR_RVCT BIT(1)
42#define CNTACR_RFRQ BIT(2)
43#define CNTACR_RVOFF BIT(3)
44#define CNTACR_RWVT BIT(4)
45#define CNTACR_RWPT BIT(5)
46
Stephen Boyd22006992013-07-18 16:59:32 -070047#define CNTVCT_LO 0x08
48#define CNTVCT_HI 0x0c
49#define CNTFRQ 0x10
50#define CNTP_TVAL 0x28
51#define CNTP_CTL 0x2c
52#define CNTV_TVAL 0x38
53#define CNTV_CTL 0x3c
54
55#define ARCH_CP15_TIMER BIT(0)
56#define ARCH_MEM_TIMER BIT(1)
57static unsigned arch_timers_present __initdata;
58
59static void __iomem *arch_counter_base;
60
61struct arch_timer {
62 void __iomem *base;
63 struct clock_event_device evt;
64};
65
66#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
67
Mark Rutland8a4da6e2012-11-12 14:33:44 +000068static u32 arch_timer_rate;
69
70enum ppi_nr {
71 PHYS_SECURE_PPI,
72 PHYS_NONSECURE_PPI,
73 VIRT_PPI,
74 HYP_PPI,
75 MAX_TIMER_PPI
76};
77
78static int arch_timer_ppi[MAX_TIMER_PPI];
79
80static struct clock_event_device __percpu *arch_timer_evt;
81
Marc Zyngierf81f03f2014-02-20 15:21:23 +000082static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010083static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070084static bool arch_timer_mem_use_virtual;
Brian Norrisd8ec7592016-10-04 11:12:09 -070085static bool arch_counter_suspend_stop;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000086
Will Deacon46fd5c62016-06-27 17:30:13 +010087static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
88
89static int __init early_evtstrm_cfg(char *buf)
90{
91 return strtobool(buf, &evtstrm_enable);
92}
93early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
94
Mark Rutland8a4da6e2012-11-12 14:33:44 +000095/*
96 * Architected system timer support.
97 */
98
Scott Woodf6dc1572016-09-22 03:35:17 -050099#ifdef CONFIG_FSL_ERRATUM_A008585
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000100/*
101 * The number of retries is an arbitrary value well beyond the highest number
102 * of iterations the loop has been observed to take.
103 */
104#define __fsl_a008585_read_reg(reg) ({ \
105 u64 _old, _new; \
106 int _retries = 200; \
107 \
108 do { \
109 _old = read_sysreg(reg); \
110 _new = read_sysreg(reg); \
111 _retries--; \
112 } while (unlikely(_old != _new) && _retries); \
113 \
114 WARN_ON_ONCE(!_retries); \
115 _new; \
116})
Scott Woodf6dc1572016-09-22 03:35:17 -0500117
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000118static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500119{
120 return __fsl_a008585_read_reg(cntp_tval_el0);
121}
122
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000123static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500124{
125 return __fsl_a008585_read_reg(cntv_tval_el0);
126}
127
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000128static u64 notrace fsl_a008585_read_cntvct_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500129{
130 return __fsl_a008585_read_reg(cntvct_el0);
131}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000132#endif
133
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000134#ifdef CONFIG_HISILICON_ERRATUM_161010101
135/*
136 * Verify whether the value of the second read is larger than the first by
137 * less than 32 is the only way to confirm the value is correct, so clear the
138 * lower 5 bits to check whether the difference is greater than 32 or not.
139 * Theoretically the erratum should not occur more than twice in succession
140 * when reading the system counter, but it is possible that some interrupts
141 * may lead to more than twice read errors, triggering the warning, so setting
142 * the number of retries far beyond the number of iterations the loop has been
143 * observed to take.
144 */
145#define __hisi_161010101_read_reg(reg) ({ \
146 u64 _old, _new; \
147 int _retries = 50; \
148 \
149 do { \
150 _old = read_sysreg(reg); \
151 _new = read_sysreg(reg); \
152 _retries--; \
153 } while (unlikely((_new - _old) >> 5) && _retries); \
154 \
155 WARN_ON_ONCE(!_retries); \
156 _new; \
157})
158
159static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
160{
161 return __hisi_161010101_read_reg(cntp_tval_el0);
162}
163
164static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
165{
166 return __hisi_161010101_read_reg(cntv_tval_el0);
167}
168
169static u64 notrace hisi_161010101_read_cntvct_el0(void)
170{
171 return __hisi_161010101_read_reg(cntvct_el0);
172}
173#endif
174
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000175#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
176const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
177EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
178
179DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
180EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
181
182static const struct arch_timer_erratum_workaround ool_workarounds[] = {
183#ifdef CONFIG_FSL_ERRATUM_A008585
184 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000185 .match_type = ate_match_dt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000186 .id = "fsl,erratum-a008585",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000187 .desc = "Freescale erratum a005858",
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000188 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
189 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
190 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
191 },
192#endif
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000193#ifdef CONFIG_HISILICON_ERRATUM_161010101
194 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000195 .match_type = ate_match_dt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000196 .id = "hisilicon,erratum-161010101",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000197 .desc = "HiSilicon erratum 161010101",
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000198 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
199 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
200 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
201 },
202#endif
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000203};
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000204
205typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
206 const void *);
207
208static
209bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
210 const void *arg)
211{
212 const struct device_node *np = arg;
213
214 return of_property_read_bool(np, wa->id);
215}
216
217static const struct arch_timer_erratum_workaround *
218arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
219 ate_match_fn_t match_fn,
220 void *arg)
221{
222 int i;
223
224 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
225 if (ool_workarounds[i].match_type != type)
226 continue;
227
228 if (match_fn(&ool_workarounds[i], arg))
229 return &ool_workarounds[i];
230 }
231
232 return NULL;
233}
234
235static
236void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa)
237{
238 timer_unstable_counter_workaround = wa;
239 static_branch_enable(&arch_timer_read_ool_enabled);
240}
241
242static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
243 void *arg)
244{
245 const struct arch_timer_erratum_workaround *wa;
246 ate_match_fn_t match_fn = NULL;
247
248 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
249 return;
250
251 switch (type) {
252 case ate_match_dt:
253 match_fn = arch_timer_check_dt_erratum;
254 break;
255 default:
256 WARN_ON(1);
257 return;
258 }
259
260 wa = arch_timer_iterate_errata(type, match_fn, arg);
261 if (!wa)
262 return;
263
264 arch_timer_enable_workaround(wa);
265 pr_info("Enabling global workaround for %s\n", wa->desc);
266}
267
268#else
269#define arch_timer_check_ool_workaround(t,a) do { } while(0)
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000270#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500271
Stephen Boyd60faddf2013-07-18 16:59:31 -0700272static __always_inline
273void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200274 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700275{
Stephen Boyd22006992013-07-18 16:59:32 -0700276 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
277 struct arch_timer *timer = to_arch_timer(clk);
278 switch (reg) {
279 case ARCH_TIMER_REG_CTRL:
280 writel_relaxed(val, timer->base + CNTP_CTL);
281 break;
282 case ARCH_TIMER_REG_TVAL:
283 writel_relaxed(val, timer->base + CNTP_TVAL);
284 break;
285 }
286 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
287 struct arch_timer *timer = to_arch_timer(clk);
288 switch (reg) {
289 case ARCH_TIMER_REG_CTRL:
290 writel_relaxed(val, timer->base + CNTV_CTL);
291 break;
292 case ARCH_TIMER_REG_TVAL:
293 writel_relaxed(val, timer->base + CNTV_TVAL);
294 break;
295 }
296 } else {
297 arch_timer_reg_write_cp15(access, reg, val);
298 }
Stephen Boyd60faddf2013-07-18 16:59:31 -0700299}
300
301static __always_inline
302u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200303 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700304{
Stephen Boyd22006992013-07-18 16:59:32 -0700305 u32 val;
306
307 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
308 struct arch_timer *timer = to_arch_timer(clk);
309 switch (reg) {
310 case ARCH_TIMER_REG_CTRL:
311 val = readl_relaxed(timer->base + CNTP_CTL);
312 break;
313 case ARCH_TIMER_REG_TVAL:
314 val = readl_relaxed(timer->base + CNTP_TVAL);
315 break;
316 }
317 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
318 struct arch_timer *timer = to_arch_timer(clk);
319 switch (reg) {
320 case ARCH_TIMER_REG_CTRL:
321 val = readl_relaxed(timer->base + CNTV_CTL);
322 break;
323 case ARCH_TIMER_REG_TVAL:
324 val = readl_relaxed(timer->base + CNTV_TVAL);
325 break;
326 }
327 } else {
328 val = arch_timer_reg_read_cp15(access, reg);
329 }
330
331 return val;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700332}
333
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700334static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000335 struct clock_event_device *evt)
336{
337 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200338
Stephen Boyd60faddf2013-07-18 16:59:31 -0700339 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000340 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
341 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700342 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000343 evt->event_handler(evt);
344 return IRQ_HANDLED;
345 }
346
347 return IRQ_NONE;
348}
349
350static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
351{
352 struct clock_event_device *evt = dev_id;
353
354 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
355}
356
357static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
358{
359 struct clock_event_device *evt = dev_id;
360
361 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
362}
363
Stephen Boyd22006992013-07-18 16:59:32 -0700364static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
365{
366 struct clock_event_device *evt = dev_id;
367
368 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
369}
370
371static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
372{
373 struct clock_event_device *evt = dev_id;
374
375 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
376}
377
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530378static __always_inline int timer_shutdown(const int access,
379 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000380{
381 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530382
383 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
384 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
385 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
386
387 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000388}
389
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530390static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000391{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530392 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000393}
394
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530395static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000396{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530397 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000398}
399
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530400static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700401{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530402 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700403}
404
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530405static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700406{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530407 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700408}
409
Stephen Boyd60faddf2013-07-18 16:59:31 -0700410static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200411 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000412{
413 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700414 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000415 ctrl |= ARCH_TIMER_CTRL_ENABLE;
416 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700417 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
418 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000419}
420
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000421#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
422static __always_inline void erratum_set_next_event_generic(const int access,
Scott Woodf6dc1572016-09-22 03:35:17 -0500423 unsigned long evt, struct clock_event_device *clk)
424{
425 unsigned long ctrl;
426 u64 cval = evt + arch_counter_get_cntvct();
427
428 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
429 ctrl |= ARCH_TIMER_CTRL_ENABLE;
430 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
431
432 if (access == ARCH_TIMER_PHYS_ACCESS)
433 write_sysreg(cval, cntp_cval_el0);
434 else if (access == ARCH_TIMER_VIRT_ACCESS)
435 write_sysreg(cval, cntv_cval_el0);
436
437 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
438}
439
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000440static int erratum_set_next_event_virt(unsigned long evt,
Scott Woodf6dc1572016-09-22 03:35:17 -0500441 struct clock_event_device *clk)
442{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000443 erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Scott Woodf6dc1572016-09-22 03:35:17 -0500444 return 0;
445}
446
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000447static int erratum_set_next_event_phys(unsigned long evt,
Scott Woodf6dc1572016-09-22 03:35:17 -0500448 struct clock_event_device *clk)
449{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000450 erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Scott Woodf6dc1572016-09-22 03:35:17 -0500451 return 0;
452}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000453#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500454
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000455static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700456 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000457{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700458 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000459 return 0;
460}
461
462static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700463 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000464{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700465 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000466 return 0;
467}
468
Stephen Boyd22006992013-07-18 16:59:32 -0700469static int arch_timer_set_next_event_virt_mem(unsigned long evt,
470 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000471{
Stephen Boyd22006992013-07-18 16:59:32 -0700472 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
473 return 0;
474}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000475
Stephen Boyd22006992013-07-18 16:59:32 -0700476static int arch_timer_set_next_event_phys_mem(unsigned long evt,
477 struct clock_event_device *clk)
478{
479 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
480 return 0;
481}
482
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000483static void erratum_workaround_set_sne(struct clock_event_device *clk)
Scott Woodf6dc1572016-09-22 03:35:17 -0500484{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000485#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Scott Woodf6dc1572016-09-22 03:35:17 -0500486 if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
487 return;
488
489 if (arch_timer_uses_ppi == VIRT_PPI)
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000490 clk->set_next_event = erratum_set_next_event_virt;
Scott Woodf6dc1572016-09-22 03:35:17 -0500491 else
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000492 clk->set_next_event = erratum_set_next_event_phys;
Scott Woodf6dc1572016-09-22 03:35:17 -0500493#endif
494}
495
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200496static void __arch_timer_setup(unsigned type,
497 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700498{
499 clk->features = CLOCK_EVT_FEAT_ONESHOT;
500
501 if (type == ARCH_CP15_TIMER) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100502 if (arch_timer_c3stop)
503 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700504 clk->name = "arch_sys_timer";
505 clk->rating = 450;
506 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000507 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
508 switch (arch_timer_uses_ppi) {
509 case VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530510 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530511 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700512 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000513 break;
514 case PHYS_SECURE_PPI:
515 case PHYS_NONSECURE_PPI:
516 case HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530517 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530518 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700519 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000520 break;
521 default:
522 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700523 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500524
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000525 erratum_workaround_set_sne(clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700526 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800527 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700528 clk->name = "arch_mem_timer";
529 clk->rating = 400;
530 clk->cpumask = cpu_all_mask;
531 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530532 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530533 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700534 clk->set_next_event =
535 arch_timer_set_next_event_virt_mem;
536 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530537 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530538 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700539 clk->set_next_event =
540 arch_timer_set_next_event_phys_mem;
541 }
542 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000543
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530544 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000545
Stephen Boyd22006992013-07-18 16:59:32 -0700546 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
547}
548
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200549static void arch_timer_evtstrm_enable(int divider)
550{
551 u32 cntkctl = arch_timer_get_cntkctl();
552
553 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
554 /* Set the divider and enable virtual event stream */
555 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
556 | ARCH_TIMER_VIRT_EVT_EN;
557 arch_timer_set_cntkctl(cntkctl);
558 elf_hwcap |= HWCAP_EVTSTRM;
559#ifdef CONFIG_COMPAT
560 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
561#endif
562}
563
Will Deacon037f6372013-08-23 15:32:29 +0100564static void arch_timer_configure_evtstream(void)
565{
566 int evt_stream_div, pos;
567
568 /* Find the closest power of two to the divisor */
569 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
570 pos = fls(evt_stream_div);
571 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
572 pos--;
573 /* enable event stream */
574 arch_timer_evtstrm_enable(min(pos, 15));
575}
576
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200577static void arch_counter_set_user_access(void)
578{
579 u32 cntkctl = arch_timer_get_cntkctl();
580
581 /* Disable user access to the timers and the physical counter */
582 /* Also disable virtual event stream */
583 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
584 | ARCH_TIMER_USR_VT_ACCESS_EN
585 | ARCH_TIMER_VIRT_EVT_EN
586 | ARCH_TIMER_USR_PCT_ACCESS_EN);
587
588 /* Enable user access to the virtual counter */
589 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
590
591 arch_timer_set_cntkctl(cntkctl);
592}
593
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000594static bool arch_timer_has_nonsecure_ppi(void)
595{
596 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
597 arch_timer_ppi[PHYS_NONSECURE_PPI]);
598}
599
Marc Zyngierf005bd72016-08-01 10:54:15 +0100600static u32 check_ppi_trigger(int irq)
601{
602 u32 flags = irq_get_trigger_type(irq);
603
604 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
605 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
606 pr_warn("WARNING: Please fix your firmware\n");
607 flags = IRQF_TRIGGER_LOW;
608 }
609
610 return flags;
611}
612
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000613static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000614{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000615 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100616 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000617
Stephen Boyd22006992013-07-18 16:59:32 -0700618 __arch_timer_setup(ARCH_CP15_TIMER, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000619
Marc Zyngierf005bd72016-08-01 10:54:15 +0100620 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
621 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000622
Marc Zyngierf005bd72016-08-01 10:54:15 +0100623 if (arch_timer_has_nonsecure_ppi()) {
624 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
625 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
626 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000627
628 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100629 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100630 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000631
632 return 0;
633}
634
Stephen Boyd22006992013-07-18 16:59:32 -0700635static void
636arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000637{
Stephen Boyd22006992013-07-18 16:59:32 -0700638 /* Who has more than one independent system counter? */
639 if (arch_timer_rate)
640 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000641
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000642 /*
643 * Try to determine the frequency from the device tree or CNTFRQ,
644 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
645 */
646 if (!acpi_disabled ||
647 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
Stephen Boyd22006992013-07-18 16:59:32 -0700648 if (cntbase)
649 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
650 else
651 arch_timer_rate = arch_timer_get_cntfrq();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000652 }
653
Stephen Boyd22006992013-07-18 16:59:32 -0700654 /* Check the timer frequency. */
655 if (arch_timer_rate == 0)
656 pr_warn("Architected timer frequency not available\n");
657}
658
659static void arch_timer_banner(unsigned type)
660{
661 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
662 type & ARCH_CP15_TIMER ? "cp15" : "",
663 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
664 type & ARCH_MEM_TIMER ? "mmio" : "",
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000665 (unsigned long)arch_timer_rate / 1000000,
666 (unsigned long)(arch_timer_rate / 10000) % 100,
Stephen Boyd22006992013-07-18 16:59:32 -0700667 type & ARCH_CP15_TIMER ?
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000668 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700669 "",
670 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
671 type & ARCH_MEM_TIMER ?
672 arch_timer_mem_use_virtual ? "virt" : "phys" :
673 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000674}
675
676u32 arch_timer_get_rate(void)
677{
678 return arch_timer_rate;
679}
680
Stephen Boyd22006992013-07-18 16:59:32 -0700681static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000682{
Stephen Boyd22006992013-07-18 16:59:32 -0700683 u32 vct_lo, vct_hi, tmp_hi;
684
685 do {
686 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
687 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
688 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
689 } while (vct_hi != tmp_hi);
690
691 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000692}
693
Stephen Boyd22006992013-07-18 16:59:32 -0700694/*
695 * Default to cp15 based access because arm64 uses this function for
696 * sched_clock() before DT is probed and the cp15 method is guaranteed
697 * to exist on arm64. arm doesn't use this before DT is probed so even
698 * if we don't have the cp15 accessors we won't have a problem.
699 */
700u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
701
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100702static u64 arch_counter_read(struct clocksource *cs)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000703{
Stephen Boyd22006992013-07-18 16:59:32 -0700704 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000705}
706
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100707static u64 arch_counter_read_cc(const struct cyclecounter *cc)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000708{
Stephen Boyd22006992013-07-18 16:59:32 -0700709 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000710}
711
712static struct clocksource clocksource_counter = {
713 .name = "arch_sys_counter",
714 .rating = 400,
715 .read = arch_counter_read,
716 .mask = CLOCKSOURCE_MASK(56),
Brian Norrisd8ec7592016-10-04 11:12:09 -0700717 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000718};
719
Bhumika Goyal3d837bc2017-02-12 00:50:18 +0530720static struct cyclecounter cyclecounter __ro_after_init = {
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000721 .read = arch_counter_read_cc,
722 .mask = CLOCKSOURCE_MASK(56),
723};
724
Julien Grallb4d6ce92016-04-11 16:32:51 +0100725static struct arch_timer_kvm_info arch_timer_kvm_info;
726
727struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
728{
729 return &arch_timer_kvm_info;
730}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000731
Stephen Boyd22006992013-07-18 16:59:32 -0700732static void __init arch_counter_register(unsigned type)
733{
734 u64 start_count;
735
736 /* Register the CP15 based counter if we have one */
Nathan Lynch423bd692014-09-29 01:50:06 +0200737 if (type & ARCH_CP15_TIMER) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000738 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800739 arch_timer_read_counter = arch_counter_get_cntvct;
740 else
741 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -0500742
Scott Wood1d8f51d2016-09-22 03:35:18 -0500743 clocksource_counter.archdata.vdso_direct = true;
744
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000745#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Scott Woodf6dc1572016-09-22 03:35:17 -0500746 /*
747 * Don't use the vdso fastpath if errata require using
748 * the out-of-line counter accessor.
749 */
750 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
Scott Wood1d8f51d2016-09-22 03:35:18 -0500751 clocksource_counter.archdata.vdso_direct = false;
Scott Woodf6dc1572016-09-22 03:35:17 -0500752#endif
Nathan Lynch423bd692014-09-29 01:50:06 +0200753 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700754 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +0200755 }
756
Brian Norrisd8ec7592016-10-04 11:12:09 -0700757 if (!arch_counter_suspend_stop)
758 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700759 start_count = arch_timer_read_counter();
760 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
761 cyclecounter.mult = clocksource_counter.mult;
762 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100763 timecounter_init(&arch_timer_kvm_info.timecounter,
764 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200765
766 /* 56 bits minimum, so we assume worst case rollover */
767 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700768}
769
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400770static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000771{
772 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
773 clk->irq, smp_processor_id());
774
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000775 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
776 if (arch_timer_has_nonsecure_ppi())
777 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000778
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530779 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000780}
781
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000782static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000783{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000784 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000785
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000786 arch_timer_stop(clk);
787 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000788}
789
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100790#ifdef CONFIG_CPU_PM
791static unsigned int saved_cntkctl;
792static int arch_timer_cpu_pm_notify(struct notifier_block *self,
793 unsigned long action, void *hcpu)
794{
795 if (action == CPU_PM_ENTER)
796 saved_cntkctl = arch_timer_get_cntkctl();
797 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
798 arch_timer_set_cntkctl(saved_cntkctl);
799 return NOTIFY_OK;
800}
801
802static struct notifier_block arch_timer_cpu_pm_notifier = {
803 .notifier_call = arch_timer_cpu_pm_notify,
804};
805
806static int __init arch_timer_cpu_pm_init(void)
807{
808 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
809}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000810
811static void __init arch_timer_cpu_pm_deinit(void)
812{
813 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
814}
815
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100816#else
817static int __init arch_timer_cpu_pm_init(void)
818{
819 return 0;
820}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000821
822static void __init arch_timer_cpu_pm_deinit(void)
823{
824}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100825#endif
826
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000827static int __init arch_timer_register(void)
828{
829 int err;
830 int ppi;
831
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000832 arch_timer_evt = alloc_percpu(struct clock_event_device);
833 if (!arch_timer_evt) {
834 err = -ENOMEM;
835 goto out;
836 }
837
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000838 ppi = arch_timer_ppi[arch_timer_uses_ppi];
839 switch (arch_timer_uses_ppi) {
840 case VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000841 err = request_percpu_irq(ppi, arch_timer_handler_virt,
842 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000843 break;
844 case PHYS_SECURE_PPI:
845 case PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000846 err = request_percpu_irq(ppi, arch_timer_handler_phys,
847 "arch_timer", arch_timer_evt);
848 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
849 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
850 err = request_percpu_irq(ppi, arch_timer_handler_phys,
851 "arch_timer", arch_timer_evt);
852 if (err)
853 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
854 arch_timer_evt);
855 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000856 break;
857 case HYP_PPI:
858 err = request_percpu_irq(ppi, arch_timer_handler_phys,
859 "arch_timer", arch_timer_evt);
860 break;
861 default:
862 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000863 }
864
865 if (err) {
866 pr_err("arch_timer: can't register interrupt %d (%d)\n",
867 ppi, err);
868 goto out_free;
869 }
870
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100871 err = arch_timer_cpu_pm_init();
872 if (err)
873 goto out_unreg_notify;
874
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000875
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000876 /* Register and immediately configure the timer on the boot CPU */
877 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +0100878 "clockevents/arm/arch_timer:starting",
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000879 arch_timer_starting_cpu, arch_timer_dying_cpu);
880 if (err)
881 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000882 return 0;
883
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000884out_unreg_cpupm:
885 arch_timer_cpu_pm_deinit();
886
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100887out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000888 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
889 if (arch_timer_has_nonsecure_ppi())
890 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000891 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000892
893out_free:
894 free_percpu(arch_timer_evt);
895out:
896 return err;
897}
898
Stephen Boyd22006992013-07-18 16:59:32 -0700899static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
900{
901 int ret;
902 irq_handler_t func;
903 struct arch_timer *t;
904
905 t = kzalloc(sizeof(*t), GFP_KERNEL);
906 if (!t)
907 return -ENOMEM;
908
909 t->base = base;
910 t->evt.irq = irq;
911 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
912
913 if (arch_timer_mem_use_virtual)
914 func = arch_timer_handler_virt_mem;
915 else
916 func = arch_timer_handler_phys_mem;
917
918 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
919 if (ret) {
920 pr_err("arch_timer: Failed to request mem timer irq\n");
921 kfree(t);
922 }
923
924 return ret;
925}
926
927static const struct of_device_id arch_timer_of_match[] __initconst = {
928 { .compatible = "arm,armv7-timer", },
929 { .compatible = "arm,armv8-timer", },
930 {},
931};
932
933static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
934 { .compatible = "arm,armv7-timer-mem", },
935 {},
936};
937
Sudeep Hollac387f072014-09-29 01:50:05 +0200938static bool __init
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200939arch_timer_needs_probing(int type, const struct of_device_id *matches)
Sudeep Hollac387f072014-09-29 01:50:05 +0200940{
941 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200942 bool needs_probing = false;
Sudeep Hollac387f072014-09-29 01:50:05 +0200943
944 dn = of_find_matching_node(NULL, matches);
Marc Zyngier59aa8962014-10-15 16:06:20 +0100945 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200946 needs_probing = true;
Sudeep Hollac387f072014-09-29 01:50:05 +0200947 of_node_put(dn);
948
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200949 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +0200950}
951
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200952static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -0700953{
954 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
955
956 /* Wait until both nodes are probed if we have two timers */
957 if ((arch_timers_present & mask) != mask) {
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200958 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200959 return 0;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200960 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200961 return 0;
Stephen Boyd22006992013-07-18 16:59:32 -0700962 }
963
964 arch_timer_banner(arch_timers_present);
965 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200966 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -0700967}
968
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200969static int __init arch_timer_init(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000970{
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200971 int ret;
Doug Anderson65b57322014-10-08 00:33:47 -0700972 /*
Marc Zyngier82668912013-01-10 11:13:07 +0000973 * If HYP mode is available, we know that the physical timer
974 * has been configured to be accessible from PL1. Use it, so
975 * that a guest can use the virtual timer instead.
976 *
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000977 * If no interrupt provided for virtual timer, we'll have to
978 * stick to the physical timer. It'd better be accessible...
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000979 *
980 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
981 * accesses to CNTP_*_EL1 registers are silently redirected to
982 * their CNTHP_*_EL2 counterparts, and use a different PPI
983 * number.
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000984 */
Marc Zyngier82668912013-01-10 11:13:07 +0000985 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000986 bool has_ppi;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000987
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000988 if (is_kernel_in_hyp_mode()) {
989 arch_timer_uses_ppi = HYP_PPI;
990 has_ppi = !!arch_timer_ppi[HYP_PPI];
991 } else {
992 arch_timer_uses_ppi = PHYS_SECURE_PPI;
993 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
994 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
995 }
996
997 if (!has_ppi) {
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000998 pr_warn("arch_timer: No interrupt available, giving up\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200999 return -EINVAL;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001000 }
1001 }
1002
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001003 ret = arch_timer_register();
1004 if (ret)
1005 return ret;
1006
1007 ret = arch_timer_common_init();
1008 if (ret)
1009 return ret;
Julien Gralld9b5e412016-04-11 16:32:52 +01001010
1011 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001012
1013 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001014}
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001015
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001016static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001017{
1018 int i;
1019
1020 if (arch_timers_present & ARCH_CP15_TIMER) {
1021 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001022 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001023 }
1024
1025 arch_timers_present |= ARCH_CP15_TIMER;
1026 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
1027 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1028
1029 arch_timer_detect_rate(NULL, np);
1030
1031 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1032
Marc Zyngier651bb2e2017-01-19 17:20:59 +00001033 /* Check for globally applicable workarounds */
1034 arch_timer_check_ool_workaround(ate_match_dt, np);
Scott Woodf6dc1572016-09-22 03:35:17 -05001035
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001036 /*
1037 * If we cannot rely on firmware initializing the timer registers then
1038 * we should use the physical timers instead.
1039 */
1040 if (IS_ENABLED(CONFIG_ARM) &&
1041 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001042 arch_timer_uses_ppi = PHYS_SECURE_PPI;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001043
Brian Norrisd8ec7592016-10-04 11:12:09 -07001044 /* On some systems, the counter stops ticking when in suspend. */
1045 arch_counter_suspend_stop = of_property_read_bool(np,
1046 "arm,no-tick-in-suspend");
1047
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001048 return arch_timer_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001049}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001050CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1051CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -07001052
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001053static int __init arch_timer_mem_init(struct device_node *np)
Stephen Boyd22006992013-07-18 16:59:32 -07001054{
1055 struct device_node *frame, *best_frame = NULL;
1056 void __iomem *cntctlbase, *base;
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001057 unsigned int irq, ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001058 u32 cnttidr;
1059
1060 arch_timers_present |= ARCH_MEM_TIMER;
1061 cntctlbase = of_iomap(np, 0);
1062 if (!cntctlbase) {
1063 pr_err("arch_timer: Can't find CNTCTLBase\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001064 return -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -07001065 }
1066
1067 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -07001068
1069 /*
1070 * Try to find a virtual capable frame. Otherwise fall back to a
1071 * physical capable frame.
1072 */
1073 for_each_available_child_of_node(np, frame) {
1074 int n;
Robin Murphye392d602016-02-01 12:00:48 +00001075 u32 cntacr;
Stephen Boyd22006992013-07-18 16:59:32 -07001076
1077 if (of_property_read_u32(frame, "frame-number", &n)) {
1078 pr_err("arch_timer: Missing frame-number\n");
Stephen Boyd22006992013-07-18 16:59:32 -07001079 of_node_put(frame);
Robin Murphye392d602016-02-01 12:00:48 +00001080 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001081 }
1082
Robin Murphye392d602016-02-01 12:00:48 +00001083 /* Try enabling everything, and see what sticks */
1084 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1085 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1086 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1087 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1088
1089 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1090 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -07001091 of_node_put(best_frame);
1092 best_frame = frame;
1093 arch_timer_mem_use_virtual = true;
1094 break;
1095 }
Robin Murphye392d602016-02-01 12:00:48 +00001096
1097 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1098 continue;
1099
Stephen Boyd22006992013-07-18 16:59:32 -07001100 of_node_put(best_frame);
1101 best_frame = of_node_get(frame);
1102 }
1103
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001104 ret= -ENXIO;
Stephen Boydf947ee12016-10-26 00:35:50 -07001105 base = arch_counter_base = of_io_request_and_map(best_frame, 0,
1106 "arch_mem_timer");
1107 if (IS_ERR(base)) {
Stephen Boyd22006992013-07-18 16:59:32 -07001108 pr_err("arch_timer: Can't map frame's registers\n");
Robin Murphye392d602016-02-01 12:00:48 +00001109 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001110 }
1111
1112 if (arch_timer_mem_use_virtual)
1113 irq = irq_of_parse_and_map(best_frame, 1);
1114 else
1115 irq = irq_of_parse_and_map(best_frame, 0);
Robin Murphye392d602016-02-01 12:00:48 +00001116
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001117 ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001118 if (!irq) {
1119 pr_err("arch_timer: Frame missing %s irq",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001120 arch_timer_mem_use_virtual ? "virt" : "phys");
Robin Murphye392d602016-02-01 12:00:48 +00001121 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001122 }
1123
1124 arch_timer_detect_rate(base, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001125 ret = arch_timer_mem_register(base, irq);
1126 if (ret)
1127 goto out;
1128
1129 return arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001130out:
1131 iounmap(cntctlbase);
1132 of_node_put(best_frame);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001133 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001134}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001135CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Stephen Boyd22006992013-07-18 16:59:32 -07001136 arch_timer_mem_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001137
1138#ifdef CONFIG_ACPI
1139static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1140{
1141 int trigger, polarity;
1142
1143 if (!interrupt)
1144 return 0;
1145
1146 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1147 : ACPI_LEVEL_SENSITIVE;
1148
1149 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1150 : ACPI_ACTIVE_HIGH;
1151
1152 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1153}
1154
1155/* Initialize per-processor generic timer */
1156static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1157{
1158 struct acpi_table_gtdt *gtdt;
1159
1160 if (arch_timers_present & ARCH_CP15_TIMER) {
1161 pr_warn("arch_timer: already initialized, skipping\n");
1162 return -EINVAL;
1163 }
1164
1165 gtdt = container_of(table, struct acpi_table_gtdt, header);
1166
1167 arch_timers_present |= ARCH_CP15_TIMER;
1168
1169 arch_timer_ppi[PHYS_SECURE_PPI] =
1170 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1171 gtdt->secure_el1_flags);
1172
1173 arch_timer_ppi[PHYS_NONSECURE_PPI] =
1174 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1175 gtdt->non_secure_el1_flags);
1176
1177 arch_timer_ppi[VIRT_PPI] =
1178 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1179 gtdt->virtual_timer_flags);
1180
1181 arch_timer_ppi[HYP_PPI] =
1182 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1183 gtdt->non_secure_el2_flags);
1184
1185 /* Get the frequency from CNTFRQ */
1186 arch_timer_detect_rate(NULL, NULL);
1187
1188 /* Always-on capability */
1189 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1190
1191 arch_timer_init();
1192 return 0;
1193}
Marc Zyngierae281cb2015-09-28 15:49:17 +01001194CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001195#endif