blob: 2af0739efd4193cad1562797f7f5376c6116ee67 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070027#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000028#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000029
30#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000031#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000032
33#include <clocksource/arm_arch_timer.h>
34
Stephen Boyd22006992013-07-18 16:59:32 -070035#define CNTTIDR 0x08
36#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
37
Robin Murphye392d602016-02-01 12:00:48 +000038#define CNTACR(n) (0x40 + ((n) * 4))
39#define CNTACR_RPCT BIT(0)
40#define CNTACR_RVCT BIT(1)
41#define CNTACR_RFRQ BIT(2)
42#define CNTACR_RVOFF BIT(3)
43#define CNTACR_RWVT BIT(4)
44#define CNTACR_RWPT BIT(5)
45
Stephen Boyd22006992013-07-18 16:59:32 -070046#define CNTVCT_LO 0x08
47#define CNTVCT_HI 0x0c
48#define CNTFRQ 0x10
49#define CNTP_TVAL 0x28
50#define CNTP_CTL 0x2c
51#define CNTV_TVAL 0x38
52#define CNTV_CTL 0x3c
53
54#define ARCH_CP15_TIMER BIT(0)
55#define ARCH_MEM_TIMER BIT(1)
56static unsigned arch_timers_present __initdata;
57
58static void __iomem *arch_counter_base;
59
60struct arch_timer {
61 void __iomem *base;
62 struct clock_event_device evt;
63};
64
65#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
66
Mark Rutland8a4da6e2012-11-12 14:33:44 +000067static u32 arch_timer_rate;
68
69enum ppi_nr {
70 PHYS_SECURE_PPI,
71 PHYS_NONSECURE_PPI,
72 VIRT_PPI,
73 HYP_PPI,
74 MAX_TIMER_PPI
75};
76
77static int arch_timer_ppi[MAX_TIMER_PPI];
78
79static struct clock_event_device __percpu *arch_timer_evt;
80
Marc Zyngierf81f03f2014-02-20 15:21:23 +000081static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010082static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070083static bool arch_timer_mem_use_virtual;
Brian Norrisd8ec7592016-10-04 11:12:09 -070084static bool arch_counter_suspend_stop;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000085
Will Deacon46fd5c62016-06-27 17:30:13 +010086static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
87
88static int __init early_evtstrm_cfg(char *buf)
89{
90 return strtobool(buf, &evtstrm_enable);
91}
92early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
93
Mark Rutland8a4da6e2012-11-12 14:33:44 +000094/*
95 * Architected system timer support.
96 */
97
Scott Woodf6dc1572016-09-22 03:35:17 -050098#ifdef CONFIG_FSL_ERRATUM_A008585
Ding Tianhong16d10ef2017-02-06 16:47:41 +000099/*
100 * The number of retries is an arbitrary value well beyond the highest number
101 * of iterations the loop has been observed to take.
102 */
103#define __fsl_a008585_read_reg(reg) ({ \
104 u64 _old, _new; \
105 int _retries = 200; \
106 \
107 do { \
108 _old = read_sysreg(reg); \
109 _new = read_sysreg(reg); \
110 _retries--; \
111 } while (unlikely(_old != _new) && _retries); \
112 \
113 WARN_ON_ONCE(!_retries); \
114 _new; \
115})
Scott Woodf6dc1572016-09-22 03:35:17 -0500116
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000117static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500118{
119 return __fsl_a008585_read_reg(cntp_tval_el0);
120}
121
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000122static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500123{
124 return __fsl_a008585_read_reg(cntv_tval_el0);
125}
126
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000127static u64 notrace fsl_a008585_read_cntvct_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500128{
129 return __fsl_a008585_read_reg(cntvct_el0);
130}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000131#endif
132
133#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
134const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
135EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
136
137DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
138EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
139
140static const struct arch_timer_erratum_workaround ool_workarounds[] = {
141#ifdef CONFIG_FSL_ERRATUM_A008585
142 {
143 .id = "fsl,erratum-a008585",
144 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
145 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
146 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
147 },
148#endif
149};
150#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500151
Stephen Boyd60faddf2013-07-18 16:59:31 -0700152static __always_inline
153void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200154 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700155{
Stephen Boyd22006992013-07-18 16:59:32 -0700156 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
157 struct arch_timer *timer = to_arch_timer(clk);
158 switch (reg) {
159 case ARCH_TIMER_REG_CTRL:
160 writel_relaxed(val, timer->base + CNTP_CTL);
161 break;
162 case ARCH_TIMER_REG_TVAL:
163 writel_relaxed(val, timer->base + CNTP_TVAL);
164 break;
165 }
166 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
167 struct arch_timer *timer = to_arch_timer(clk);
168 switch (reg) {
169 case ARCH_TIMER_REG_CTRL:
170 writel_relaxed(val, timer->base + CNTV_CTL);
171 break;
172 case ARCH_TIMER_REG_TVAL:
173 writel_relaxed(val, timer->base + CNTV_TVAL);
174 break;
175 }
176 } else {
177 arch_timer_reg_write_cp15(access, reg, val);
178 }
Stephen Boyd60faddf2013-07-18 16:59:31 -0700179}
180
181static __always_inline
182u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200183 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700184{
Stephen Boyd22006992013-07-18 16:59:32 -0700185 u32 val;
186
187 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
188 struct arch_timer *timer = to_arch_timer(clk);
189 switch (reg) {
190 case ARCH_TIMER_REG_CTRL:
191 val = readl_relaxed(timer->base + CNTP_CTL);
192 break;
193 case ARCH_TIMER_REG_TVAL:
194 val = readl_relaxed(timer->base + CNTP_TVAL);
195 break;
196 }
197 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
198 struct arch_timer *timer = to_arch_timer(clk);
199 switch (reg) {
200 case ARCH_TIMER_REG_CTRL:
201 val = readl_relaxed(timer->base + CNTV_CTL);
202 break;
203 case ARCH_TIMER_REG_TVAL:
204 val = readl_relaxed(timer->base + CNTV_TVAL);
205 break;
206 }
207 } else {
208 val = arch_timer_reg_read_cp15(access, reg);
209 }
210
211 return val;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700212}
213
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700214static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000215 struct clock_event_device *evt)
216{
217 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200218
Stephen Boyd60faddf2013-07-18 16:59:31 -0700219 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000220 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
221 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700222 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000223 evt->event_handler(evt);
224 return IRQ_HANDLED;
225 }
226
227 return IRQ_NONE;
228}
229
230static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
231{
232 struct clock_event_device *evt = dev_id;
233
234 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
235}
236
237static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
238{
239 struct clock_event_device *evt = dev_id;
240
241 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
242}
243
Stephen Boyd22006992013-07-18 16:59:32 -0700244static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
245{
246 struct clock_event_device *evt = dev_id;
247
248 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
249}
250
251static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
252{
253 struct clock_event_device *evt = dev_id;
254
255 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
256}
257
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530258static __always_inline int timer_shutdown(const int access,
259 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000260{
261 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530262
263 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
264 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
265 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
266
267 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000268}
269
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530270static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000271{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530272 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000273}
274
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530275static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000276{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530277 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000278}
279
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530280static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700281{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530282 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700283}
284
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530285static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700286{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530287 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700288}
289
Stephen Boyd60faddf2013-07-18 16:59:31 -0700290static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200291 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000292{
293 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700294 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000295 ctrl |= ARCH_TIMER_CTRL_ENABLE;
296 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700297 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
298 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000299}
300
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000301#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
302static __always_inline void erratum_set_next_event_generic(const int access,
Scott Woodf6dc1572016-09-22 03:35:17 -0500303 unsigned long evt, struct clock_event_device *clk)
304{
305 unsigned long ctrl;
306 u64 cval = evt + arch_counter_get_cntvct();
307
308 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
309 ctrl |= ARCH_TIMER_CTRL_ENABLE;
310 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
311
312 if (access == ARCH_TIMER_PHYS_ACCESS)
313 write_sysreg(cval, cntp_cval_el0);
314 else if (access == ARCH_TIMER_VIRT_ACCESS)
315 write_sysreg(cval, cntv_cval_el0);
316
317 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
318}
319
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000320static int erratum_set_next_event_virt(unsigned long evt,
Scott Woodf6dc1572016-09-22 03:35:17 -0500321 struct clock_event_device *clk)
322{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000323 erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Scott Woodf6dc1572016-09-22 03:35:17 -0500324 return 0;
325}
326
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000327static int erratum_set_next_event_phys(unsigned long evt,
Scott Woodf6dc1572016-09-22 03:35:17 -0500328 struct clock_event_device *clk)
329{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000330 erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Scott Woodf6dc1572016-09-22 03:35:17 -0500331 return 0;
332}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000333#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500334
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000335static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700336 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000337{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700338 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000339 return 0;
340}
341
342static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700343 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000344{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700345 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000346 return 0;
347}
348
Stephen Boyd22006992013-07-18 16:59:32 -0700349static int arch_timer_set_next_event_virt_mem(unsigned long evt,
350 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000351{
Stephen Boyd22006992013-07-18 16:59:32 -0700352 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
353 return 0;
354}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000355
Stephen Boyd22006992013-07-18 16:59:32 -0700356static int arch_timer_set_next_event_phys_mem(unsigned long evt,
357 struct clock_event_device *clk)
358{
359 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
360 return 0;
361}
362
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000363static void erratum_workaround_set_sne(struct clock_event_device *clk)
Scott Woodf6dc1572016-09-22 03:35:17 -0500364{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000365#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Scott Woodf6dc1572016-09-22 03:35:17 -0500366 if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
367 return;
368
369 if (arch_timer_uses_ppi == VIRT_PPI)
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000370 clk->set_next_event = erratum_set_next_event_virt;
Scott Woodf6dc1572016-09-22 03:35:17 -0500371 else
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000372 clk->set_next_event = erratum_set_next_event_phys;
Scott Woodf6dc1572016-09-22 03:35:17 -0500373#endif
374}
375
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200376static void __arch_timer_setup(unsigned type,
377 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700378{
379 clk->features = CLOCK_EVT_FEAT_ONESHOT;
380
381 if (type == ARCH_CP15_TIMER) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100382 if (arch_timer_c3stop)
383 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700384 clk->name = "arch_sys_timer";
385 clk->rating = 450;
386 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000387 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
388 switch (arch_timer_uses_ppi) {
389 case VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530390 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530391 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700392 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000393 break;
394 case PHYS_SECURE_PPI:
395 case PHYS_NONSECURE_PPI:
396 case HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530397 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530398 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700399 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000400 break;
401 default:
402 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700403 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500404
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000405 erratum_workaround_set_sne(clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700406 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800407 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700408 clk->name = "arch_mem_timer";
409 clk->rating = 400;
410 clk->cpumask = cpu_all_mask;
411 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530412 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530413 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700414 clk->set_next_event =
415 arch_timer_set_next_event_virt_mem;
416 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530417 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530418 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700419 clk->set_next_event =
420 arch_timer_set_next_event_phys_mem;
421 }
422 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000423
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530424 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000425
Stephen Boyd22006992013-07-18 16:59:32 -0700426 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
427}
428
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200429static void arch_timer_evtstrm_enable(int divider)
430{
431 u32 cntkctl = arch_timer_get_cntkctl();
432
433 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
434 /* Set the divider and enable virtual event stream */
435 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
436 | ARCH_TIMER_VIRT_EVT_EN;
437 arch_timer_set_cntkctl(cntkctl);
438 elf_hwcap |= HWCAP_EVTSTRM;
439#ifdef CONFIG_COMPAT
440 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
441#endif
442}
443
Will Deacon037f6372013-08-23 15:32:29 +0100444static void arch_timer_configure_evtstream(void)
445{
446 int evt_stream_div, pos;
447
448 /* Find the closest power of two to the divisor */
449 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
450 pos = fls(evt_stream_div);
451 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
452 pos--;
453 /* enable event stream */
454 arch_timer_evtstrm_enable(min(pos, 15));
455}
456
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200457static void arch_counter_set_user_access(void)
458{
459 u32 cntkctl = arch_timer_get_cntkctl();
460
461 /* Disable user access to the timers and the physical counter */
462 /* Also disable virtual event stream */
463 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
464 | ARCH_TIMER_USR_VT_ACCESS_EN
465 | ARCH_TIMER_VIRT_EVT_EN
466 | ARCH_TIMER_USR_PCT_ACCESS_EN);
467
468 /* Enable user access to the virtual counter */
469 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
470
471 arch_timer_set_cntkctl(cntkctl);
472}
473
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000474static bool arch_timer_has_nonsecure_ppi(void)
475{
476 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
477 arch_timer_ppi[PHYS_NONSECURE_PPI]);
478}
479
Marc Zyngierf005bd72016-08-01 10:54:15 +0100480static u32 check_ppi_trigger(int irq)
481{
482 u32 flags = irq_get_trigger_type(irq);
483
484 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
485 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
486 pr_warn("WARNING: Please fix your firmware\n");
487 flags = IRQF_TRIGGER_LOW;
488 }
489
490 return flags;
491}
492
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000493static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000494{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000495 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100496 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000497
Stephen Boyd22006992013-07-18 16:59:32 -0700498 __arch_timer_setup(ARCH_CP15_TIMER, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000499
Marc Zyngierf005bd72016-08-01 10:54:15 +0100500 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
501 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000502
Marc Zyngierf005bd72016-08-01 10:54:15 +0100503 if (arch_timer_has_nonsecure_ppi()) {
504 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
505 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
506 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000507
508 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100509 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100510 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000511
512 return 0;
513}
514
Stephen Boyd22006992013-07-18 16:59:32 -0700515static void
516arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000517{
Stephen Boyd22006992013-07-18 16:59:32 -0700518 /* Who has more than one independent system counter? */
519 if (arch_timer_rate)
520 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000521
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000522 /*
523 * Try to determine the frequency from the device tree or CNTFRQ,
524 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
525 */
526 if (!acpi_disabled ||
527 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
Stephen Boyd22006992013-07-18 16:59:32 -0700528 if (cntbase)
529 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
530 else
531 arch_timer_rate = arch_timer_get_cntfrq();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000532 }
533
Stephen Boyd22006992013-07-18 16:59:32 -0700534 /* Check the timer frequency. */
535 if (arch_timer_rate == 0)
536 pr_warn("Architected timer frequency not available\n");
537}
538
539static void arch_timer_banner(unsigned type)
540{
541 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
542 type & ARCH_CP15_TIMER ? "cp15" : "",
543 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
544 type & ARCH_MEM_TIMER ? "mmio" : "",
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000545 (unsigned long)arch_timer_rate / 1000000,
546 (unsigned long)(arch_timer_rate / 10000) % 100,
Stephen Boyd22006992013-07-18 16:59:32 -0700547 type & ARCH_CP15_TIMER ?
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000548 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700549 "",
550 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
551 type & ARCH_MEM_TIMER ?
552 arch_timer_mem_use_virtual ? "virt" : "phys" :
553 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000554}
555
556u32 arch_timer_get_rate(void)
557{
558 return arch_timer_rate;
559}
560
Stephen Boyd22006992013-07-18 16:59:32 -0700561static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000562{
Stephen Boyd22006992013-07-18 16:59:32 -0700563 u32 vct_lo, vct_hi, tmp_hi;
564
565 do {
566 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
567 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
568 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
569 } while (vct_hi != tmp_hi);
570
571 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000572}
573
Stephen Boyd22006992013-07-18 16:59:32 -0700574/*
575 * Default to cp15 based access because arm64 uses this function for
576 * sched_clock() before DT is probed and the cp15 method is guaranteed
577 * to exist on arm64. arm doesn't use this before DT is probed so even
578 * if we don't have the cp15 accessors we won't have a problem.
579 */
580u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
581
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100582static u64 arch_counter_read(struct clocksource *cs)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000583{
Stephen Boyd22006992013-07-18 16:59:32 -0700584 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000585}
586
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100587static u64 arch_counter_read_cc(const struct cyclecounter *cc)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000588{
Stephen Boyd22006992013-07-18 16:59:32 -0700589 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000590}
591
592static struct clocksource clocksource_counter = {
593 .name = "arch_sys_counter",
594 .rating = 400,
595 .read = arch_counter_read,
596 .mask = CLOCKSOURCE_MASK(56),
Brian Norrisd8ec7592016-10-04 11:12:09 -0700597 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000598};
599
600static struct cyclecounter cyclecounter = {
601 .read = arch_counter_read_cc,
602 .mask = CLOCKSOURCE_MASK(56),
603};
604
Julien Grallb4d6ce92016-04-11 16:32:51 +0100605static struct arch_timer_kvm_info arch_timer_kvm_info;
606
607struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
608{
609 return &arch_timer_kvm_info;
610}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000611
Stephen Boyd22006992013-07-18 16:59:32 -0700612static void __init arch_counter_register(unsigned type)
613{
614 u64 start_count;
615
616 /* Register the CP15 based counter if we have one */
Nathan Lynch423bd692014-09-29 01:50:06 +0200617 if (type & ARCH_CP15_TIMER) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000618 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800619 arch_timer_read_counter = arch_counter_get_cntvct;
620 else
621 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -0500622
Scott Wood1d8f51d2016-09-22 03:35:18 -0500623 clocksource_counter.archdata.vdso_direct = true;
624
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000625#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Scott Woodf6dc1572016-09-22 03:35:17 -0500626 /*
627 * Don't use the vdso fastpath if errata require using
628 * the out-of-line counter accessor.
629 */
630 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
Scott Wood1d8f51d2016-09-22 03:35:18 -0500631 clocksource_counter.archdata.vdso_direct = false;
Scott Woodf6dc1572016-09-22 03:35:17 -0500632#endif
Nathan Lynch423bd692014-09-29 01:50:06 +0200633 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700634 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +0200635 }
636
Brian Norrisd8ec7592016-10-04 11:12:09 -0700637 if (!arch_counter_suspend_stop)
638 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700639 start_count = arch_timer_read_counter();
640 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
641 cyclecounter.mult = clocksource_counter.mult;
642 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100643 timecounter_init(&arch_timer_kvm_info.timecounter,
644 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200645
646 /* 56 bits minimum, so we assume worst case rollover */
647 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700648}
649
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400650static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000651{
652 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
653 clk->irq, smp_processor_id());
654
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000655 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
656 if (arch_timer_has_nonsecure_ppi())
657 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000658
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530659 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000660}
661
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000662static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000663{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000664 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000665
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000666 arch_timer_stop(clk);
667 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000668}
669
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100670#ifdef CONFIG_CPU_PM
671static unsigned int saved_cntkctl;
672static int arch_timer_cpu_pm_notify(struct notifier_block *self,
673 unsigned long action, void *hcpu)
674{
675 if (action == CPU_PM_ENTER)
676 saved_cntkctl = arch_timer_get_cntkctl();
677 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
678 arch_timer_set_cntkctl(saved_cntkctl);
679 return NOTIFY_OK;
680}
681
682static struct notifier_block arch_timer_cpu_pm_notifier = {
683 .notifier_call = arch_timer_cpu_pm_notify,
684};
685
686static int __init arch_timer_cpu_pm_init(void)
687{
688 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
689}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000690
691static void __init arch_timer_cpu_pm_deinit(void)
692{
693 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
694}
695
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100696#else
697static int __init arch_timer_cpu_pm_init(void)
698{
699 return 0;
700}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000701
702static void __init arch_timer_cpu_pm_deinit(void)
703{
704}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100705#endif
706
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000707static int __init arch_timer_register(void)
708{
709 int err;
710 int ppi;
711
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000712 arch_timer_evt = alloc_percpu(struct clock_event_device);
713 if (!arch_timer_evt) {
714 err = -ENOMEM;
715 goto out;
716 }
717
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000718 ppi = arch_timer_ppi[arch_timer_uses_ppi];
719 switch (arch_timer_uses_ppi) {
720 case VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000721 err = request_percpu_irq(ppi, arch_timer_handler_virt,
722 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000723 break;
724 case PHYS_SECURE_PPI:
725 case PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000726 err = request_percpu_irq(ppi, arch_timer_handler_phys,
727 "arch_timer", arch_timer_evt);
728 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
729 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
730 err = request_percpu_irq(ppi, arch_timer_handler_phys,
731 "arch_timer", arch_timer_evt);
732 if (err)
733 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
734 arch_timer_evt);
735 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000736 break;
737 case HYP_PPI:
738 err = request_percpu_irq(ppi, arch_timer_handler_phys,
739 "arch_timer", arch_timer_evt);
740 break;
741 default:
742 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000743 }
744
745 if (err) {
746 pr_err("arch_timer: can't register interrupt %d (%d)\n",
747 ppi, err);
748 goto out_free;
749 }
750
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100751 err = arch_timer_cpu_pm_init();
752 if (err)
753 goto out_unreg_notify;
754
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000755
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000756 /* Register and immediately configure the timer on the boot CPU */
757 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +0100758 "clockevents/arm/arch_timer:starting",
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000759 arch_timer_starting_cpu, arch_timer_dying_cpu);
760 if (err)
761 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000762 return 0;
763
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000764out_unreg_cpupm:
765 arch_timer_cpu_pm_deinit();
766
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100767out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000768 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
769 if (arch_timer_has_nonsecure_ppi())
770 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000771 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000772
773out_free:
774 free_percpu(arch_timer_evt);
775out:
776 return err;
777}
778
Stephen Boyd22006992013-07-18 16:59:32 -0700779static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
780{
781 int ret;
782 irq_handler_t func;
783 struct arch_timer *t;
784
785 t = kzalloc(sizeof(*t), GFP_KERNEL);
786 if (!t)
787 return -ENOMEM;
788
789 t->base = base;
790 t->evt.irq = irq;
791 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
792
793 if (arch_timer_mem_use_virtual)
794 func = arch_timer_handler_virt_mem;
795 else
796 func = arch_timer_handler_phys_mem;
797
798 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
799 if (ret) {
800 pr_err("arch_timer: Failed to request mem timer irq\n");
801 kfree(t);
802 }
803
804 return ret;
805}
806
807static const struct of_device_id arch_timer_of_match[] __initconst = {
808 { .compatible = "arm,armv7-timer", },
809 { .compatible = "arm,armv8-timer", },
810 {},
811};
812
813static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
814 { .compatible = "arm,armv7-timer-mem", },
815 {},
816};
817
Sudeep Hollac387f072014-09-29 01:50:05 +0200818static bool __init
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200819arch_timer_needs_probing(int type, const struct of_device_id *matches)
Sudeep Hollac387f072014-09-29 01:50:05 +0200820{
821 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200822 bool needs_probing = false;
Sudeep Hollac387f072014-09-29 01:50:05 +0200823
824 dn = of_find_matching_node(NULL, matches);
Marc Zyngier59aa8962014-10-15 16:06:20 +0100825 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200826 needs_probing = true;
Sudeep Hollac387f072014-09-29 01:50:05 +0200827 of_node_put(dn);
828
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200829 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +0200830}
831
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200832static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -0700833{
834 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
835
836 /* Wait until both nodes are probed if we have two timers */
837 if ((arch_timers_present & mask) != mask) {
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200838 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200839 return 0;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200840 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200841 return 0;
Stephen Boyd22006992013-07-18 16:59:32 -0700842 }
843
844 arch_timer_banner(arch_timers_present);
845 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200846 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -0700847}
848
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200849static int __init arch_timer_init(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000850{
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200851 int ret;
Doug Anderson65b57322014-10-08 00:33:47 -0700852 /*
Marc Zyngier82668912013-01-10 11:13:07 +0000853 * If HYP mode is available, we know that the physical timer
854 * has been configured to be accessible from PL1. Use it, so
855 * that a guest can use the virtual timer instead.
856 *
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000857 * If no interrupt provided for virtual timer, we'll have to
858 * stick to the physical timer. It'd better be accessible...
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000859 *
860 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
861 * accesses to CNTP_*_EL1 registers are silently redirected to
862 * their CNTHP_*_EL2 counterparts, and use a different PPI
863 * number.
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000864 */
Marc Zyngier82668912013-01-10 11:13:07 +0000865 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000866 bool has_ppi;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000867
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000868 if (is_kernel_in_hyp_mode()) {
869 arch_timer_uses_ppi = HYP_PPI;
870 has_ppi = !!arch_timer_ppi[HYP_PPI];
871 } else {
872 arch_timer_uses_ppi = PHYS_SECURE_PPI;
873 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
874 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
875 }
876
877 if (!has_ppi) {
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000878 pr_warn("arch_timer: No interrupt available, giving up\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200879 return -EINVAL;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000880 }
881 }
882
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200883 ret = arch_timer_register();
884 if (ret)
885 return ret;
886
887 ret = arch_timer_common_init();
888 if (ret)
889 return ret;
Julien Gralld9b5e412016-04-11 16:32:52 +0100890
891 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200892
893 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000894}
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000895
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200896static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000897{
898 int i;
899
900 if (arch_timers_present & ARCH_CP15_TIMER) {
901 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200902 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000903 }
904
905 arch_timers_present |= ARCH_CP15_TIMER;
906 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
907 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
908
909 arch_timer_detect_rate(NULL, np);
910
911 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
912
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000913#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
914 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
915 if (of_property_read_bool(np, ool_workarounds[i].id)) {
916 timer_unstable_counter_workaround = &ool_workarounds[i];
917 static_branch_enable(&arch_timer_read_ool_enabled);
918 pr_info("arch_timer: Enabling workaround for %s\n",
919 timer_unstable_counter_workaround->id);
920 break;
921 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500922 }
923#endif
924
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000925 /*
926 * If we cannot rely on firmware initializing the timer registers then
927 * we should use the physical timers instead.
928 */
929 if (IS_ENABLED(CONFIG_ARM) &&
930 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000931 arch_timer_uses_ppi = PHYS_SECURE_PPI;
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000932
Brian Norrisd8ec7592016-10-04 11:12:09 -0700933 /* On some systems, the counter stops ticking when in suspend. */
934 arch_counter_suspend_stop = of_property_read_bool(np,
935 "arm,no-tick-in-suspend");
936
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200937 return arch_timer_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000938}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +0200939CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
940CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -0700941
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200942static int __init arch_timer_mem_init(struct device_node *np)
Stephen Boyd22006992013-07-18 16:59:32 -0700943{
944 struct device_node *frame, *best_frame = NULL;
945 void __iomem *cntctlbase, *base;
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200946 unsigned int irq, ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -0700947 u32 cnttidr;
948
949 arch_timers_present |= ARCH_MEM_TIMER;
950 cntctlbase = of_iomap(np, 0);
951 if (!cntctlbase) {
952 pr_err("arch_timer: Can't find CNTCTLBase\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200953 return -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -0700954 }
955
956 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -0700957
958 /*
959 * Try to find a virtual capable frame. Otherwise fall back to a
960 * physical capable frame.
961 */
962 for_each_available_child_of_node(np, frame) {
963 int n;
Robin Murphye392d602016-02-01 12:00:48 +0000964 u32 cntacr;
Stephen Boyd22006992013-07-18 16:59:32 -0700965
966 if (of_property_read_u32(frame, "frame-number", &n)) {
967 pr_err("arch_timer: Missing frame-number\n");
Stephen Boyd22006992013-07-18 16:59:32 -0700968 of_node_put(frame);
Robin Murphye392d602016-02-01 12:00:48 +0000969 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -0700970 }
971
Robin Murphye392d602016-02-01 12:00:48 +0000972 /* Try enabling everything, and see what sticks */
973 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
974 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
975 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
976 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
977
978 if ((cnttidr & CNTTIDR_VIRT(n)) &&
979 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -0700980 of_node_put(best_frame);
981 best_frame = frame;
982 arch_timer_mem_use_virtual = true;
983 break;
984 }
Robin Murphye392d602016-02-01 12:00:48 +0000985
986 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
987 continue;
988
Stephen Boyd22006992013-07-18 16:59:32 -0700989 of_node_put(best_frame);
990 best_frame = of_node_get(frame);
991 }
992
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200993 ret= -ENXIO;
Stephen Boydf947ee12016-10-26 00:35:50 -0700994 base = arch_counter_base = of_io_request_and_map(best_frame, 0,
995 "arch_mem_timer");
996 if (IS_ERR(base)) {
Stephen Boyd22006992013-07-18 16:59:32 -0700997 pr_err("arch_timer: Can't map frame's registers\n");
Robin Murphye392d602016-02-01 12:00:48 +0000998 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -0700999 }
1000
1001 if (arch_timer_mem_use_virtual)
1002 irq = irq_of_parse_and_map(best_frame, 1);
1003 else
1004 irq = irq_of_parse_and_map(best_frame, 0);
Robin Murphye392d602016-02-01 12:00:48 +00001005
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001006 ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001007 if (!irq) {
1008 pr_err("arch_timer: Frame missing %s irq",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001009 arch_timer_mem_use_virtual ? "virt" : "phys");
Robin Murphye392d602016-02-01 12:00:48 +00001010 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001011 }
1012
1013 arch_timer_detect_rate(base, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001014 ret = arch_timer_mem_register(base, irq);
1015 if (ret)
1016 goto out;
1017
1018 return arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001019out:
1020 iounmap(cntctlbase);
1021 of_node_put(best_frame);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001022 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001023}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001024CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Stephen Boyd22006992013-07-18 16:59:32 -07001025 arch_timer_mem_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001026
1027#ifdef CONFIG_ACPI
1028static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1029{
1030 int trigger, polarity;
1031
1032 if (!interrupt)
1033 return 0;
1034
1035 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1036 : ACPI_LEVEL_SENSITIVE;
1037
1038 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1039 : ACPI_ACTIVE_HIGH;
1040
1041 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1042}
1043
1044/* Initialize per-processor generic timer */
1045static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1046{
1047 struct acpi_table_gtdt *gtdt;
1048
1049 if (arch_timers_present & ARCH_CP15_TIMER) {
1050 pr_warn("arch_timer: already initialized, skipping\n");
1051 return -EINVAL;
1052 }
1053
1054 gtdt = container_of(table, struct acpi_table_gtdt, header);
1055
1056 arch_timers_present |= ARCH_CP15_TIMER;
1057
1058 arch_timer_ppi[PHYS_SECURE_PPI] =
1059 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1060 gtdt->secure_el1_flags);
1061
1062 arch_timer_ppi[PHYS_NONSECURE_PPI] =
1063 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1064 gtdt->non_secure_el1_flags);
1065
1066 arch_timer_ppi[VIRT_PPI] =
1067 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1068 gtdt->virtual_timer_flags);
1069
1070 arch_timer_ppi[HYP_PPI] =
1071 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1072 gtdt->non_secure_el2_flags);
1073
1074 /* Get the frequency from CNTFRQ */
1075 arch_timer_detect_rate(NULL, NULL);
1076
1077 /* Always-on capability */
1078 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1079
1080 arch_timer_init();
1081 return 0;
1082}
Marc Zyngierae281cb2015-09-28 15:49:17 +01001083CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001084#endif