blob: 90ed21942456888f23868317edcf242affe0b9a6 [file] [log] [blame]
Nicolas Pitree8db2882012-04-12 02:45:22 -04001/*
2 * arch/arm/include/asm/mcpm.h
3 *
4 * Created by: Nicolas Pitre, April 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef MCPM_H
13#define MCPM_H
14
15/*
16 * Maximum number of possible clusters / CPUs per cluster.
17 *
18 * This should be sufficient for quite a while, while keeping the
19 * (assembly) code simpler. When this starts to grow then we'll have
20 * to consider dynamic allocation.
21 */
22#define MAX_CPUS_PER_CLUSTER 4
23#define MAX_NR_CLUSTERS 2
24
25#ifndef __ASSEMBLY__
26
Dave Martin7fe31d22012-07-17 14:25:42 +010027#include <linux/types.h>
28#include <asm/cacheflush.h>
29
Nicolas Pitree8db2882012-04-12 02:45:22 -040030/*
31 * Platform specific code should use this symbol to set up secondary
32 * entry location for processors to use when released from reset.
33 */
34extern void mcpm_entry_point(void);
35
36/*
37 * This is used to indicate where the given CPU from given cluster should
38 * branch once it is ready to re-enter the kernel using ptr, or NULL if it
39 * should be gated. A gated CPU is held in a WFE loop until its vector
40 * becomes non NULL.
41 */
42void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
43
Nicolas Pitre7c2b8602012-09-20 16:05:37 -040044/*
Nicolas Pitrede885d12012-11-27 23:11:20 -050045 * This sets an early poke i.e a value to be poked into some address
46 * from very early assembly code before the CPU is ungated. The
47 * address must be physical, and if 0 then nothing will happen.
48 */
49void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
50 unsigned long poke_phys_addr, unsigned long poke_val);
51
52/*
Nicolas Pitre7c2b8602012-09-20 16:05:37 -040053 * CPU/cluster power operations API for higher subsystems to use.
54 */
55
56/**
57 * mcpm_cpu_power_up - make given CPU in given cluster runable
58 *
59 * @cpu: CPU number within given cluster
60 * @cluster: cluster number for the CPU
61 *
62 * The identified CPU is brought out of reset. If the cluster was powered
63 * down then it is brought up as well, taking care not to let the other CPUs
64 * in the cluster run, and ensuring appropriate cluster setup.
65 *
66 * Caller must ensure the appropriate entry vector is initialized with
67 * mcpm_set_entry_vector() prior to calling this.
68 *
69 * This must be called in a sleepable context. However, the implementation
70 * is strongly encouraged to return early and let the operation happen
71 * asynchronously, especially when significant delays are expected.
72 *
73 * If the operation cannot be performed then an error code is returned.
74 */
75int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
76
77/**
78 * mcpm_cpu_power_down - power the calling CPU down
79 *
80 * The calling CPU is powered down.
81 *
82 * If this CPU is found to be the "last man standing" in the cluster
83 * then the cluster is prepared for power-down too.
84 *
85 * This must be called with interrupts disabled.
86 *
Nicolas Pitred0cdef62013-09-25 23:26:24 +010087 * On success this does not return. Re-entry in the kernel is expected
88 * via mcpm_entry_point.
89 *
90 * This will return if mcpm_platform_register() has not been called
91 * previously in which case the caller should take appropriate action.
Dave Martin0de0d642013-10-01 19:58:17 +010092 *
93 * On success, the CPU is not guaranteed to be truly halted until
Dave Martin166aaf32014-04-17 16:58:39 +010094 * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the
Dave Martin0de0d642013-10-01 19:58:17 +010095 * specified cpu. Until then, other CPUs should make sure they do not
96 * trash memory the target CPU might be executing/accessing.
Nicolas Pitre7c2b8602012-09-20 16:05:37 -040097 */
98void mcpm_cpu_power_down(void);
99
100/**
Dave Martin166aaf32014-04-17 16:58:39 +0100101 * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and
Dave Martin0de0d642013-10-01 19:58:17 +0100102 * make sure it is powered off
103 *
104 * @cpu: CPU number within given cluster
105 * @cluster: cluster number for the CPU
106 *
107 * Call this function to ensure that a pending powerdown has taken
108 * effect and the CPU is safely parked before performing non-mcpm
109 * operations that may affect the CPU (such as kexec trashing the
110 * kernel text).
111 *
112 * It is *not* necessary to call this function if you only need to
113 * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
114 * event.
115 *
116 * Do not call this function unless the specified CPU has already
117 * called mcpm_cpu_power_down() or has committed to doing so.
118 *
119 * @return:
120 * - zero if the CPU is in a safely parked state
121 * - nonzero otherwise (e.g., timeout)
122 */
Dave Martin166aaf32014-04-17 16:58:39 +0100123int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
Dave Martin0de0d642013-10-01 19:58:17 +0100124
125/**
Nicolas Pitre7c2b8602012-09-20 16:05:37 -0400126 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
127 *
128 * @expected_residency: duration in microseconds the CPU is expected
129 * to remain suspended, or 0 if unknown/infinity.
130 *
131 * The calling CPU is suspended. The expected residency argument is used
132 * as a hint by the platform specific backend to implement the appropriate
133 * sleep state level according to the knowledge it has on wake-up latency
134 * for the given hardware.
135 *
136 * If this CPU is found to be the "last man standing" in the cluster
137 * then the cluster may be prepared for power-down too, if the expected
138 * residency makes it worthwhile.
139 *
140 * This must be called with interrupts disabled.
141 *
Nicolas Pitred0cdef62013-09-25 23:26:24 +0100142 * On success this does not return. Re-entry in the kernel is expected
143 * via mcpm_entry_point.
144 *
145 * This will return if mcpm_platform_register() has not been called
146 * previously in which case the caller should take appropriate action.
Nicolas Pitre7c2b8602012-09-20 16:05:37 -0400147 */
148void mcpm_cpu_suspend(u64 expected_residency);
149
150/**
151 * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
152 *
153 * This lets the platform specific backend code perform needed housekeeping
154 * work. This must be called by the newly activated CPU as soon as it is
155 * fully operational in kernel space, before it enables interrupts.
156 *
157 * If the operation cannot be performed then an error code is returned.
158 */
159int mcpm_cpu_powered_up(void);
160
161/*
162 * Platform specific methods used in the implementation of the above API.
163 */
164struct mcpm_platform_ops {
165 int (*power_up)(unsigned int cpu, unsigned int cluster);
166 void (*power_down)(void);
Dave Martin166aaf32014-04-17 16:58:39 +0100167 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
Nicolas Pitre7c2b8602012-09-20 16:05:37 -0400168 void (*suspend)(u64);
169 void (*powered_up)(void);
170};
171
172/**
173 * mcpm_platform_register - register platform specific power methods
174 *
175 * @ops: mcpm_platform_ops structure to register
176 *
177 * An error is returned if the registration has been done previously.
178 */
179int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
180
Dave Martin7fe31d22012-07-17 14:25:42 +0100181/* Synchronisation structures for coordinating safe cluster setup/teardown: */
182
183/*
184 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
185 * to match.
186 */
187struct mcpm_sync_struct {
188 /* individual CPU states */
189 struct {
190 s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
191 } cpus[MAX_CPUS_PER_CLUSTER];
192
193 /* cluster state */
194 s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
195
196 /* inbound-side state */
197 s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
198};
199
200struct sync_struct {
201 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
202};
203
204extern unsigned long sync_phys; /* physical address of *mcpm_sync */
205
206void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
207void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
208void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
209bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
210int __mcpm_cluster_state(unsigned int cluster);
211
212int __init mcpm_sync_init(
213 void (*power_up_setup)(unsigned int affinity_level));
214
Nicolas Pitrea7eb7c62013-04-09 01:29:17 -0400215void __init mcpm_smp_set_ops(void);
216
Dave Martin7fe31d22012-07-17 14:25:42 +0100217#else
218
219/*
220 * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
221 * cannot be included in asm files. Let's work around the conflict like this.
222 */
223#include <asm/asm-offsets.h>
224#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
225
Nicolas Pitree8db2882012-04-12 02:45:22 -0400226#endif /* ! __ASSEMBLY__ */
Dave Martin7fe31d22012-07-17 14:25:42 +0100227
228/* Definitions for mcpm_sync_struct */
229#define CPU_DOWN 0x11
230#define CPU_COMING_UP 0x12
231#define CPU_UP 0x13
232#define CPU_GOING_DOWN 0x14
233
234#define CLUSTER_DOWN 0x21
235#define CLUSTER_UP 0x22
236#define CLUSTER_GOING_DOWN 0x23
237
238#define INBOUND_NOT_COMING_UP 0x31
239#define INBOUND_COMING_UP 0x32
240
241/*
242 * Offsets for the mcpm_sync_struct members, for use in asm.
243 * We don't want to make them global to the kernel via asm-offsets.c.
244 */
245#define MCPM_SYNC_CLUSTER_CPUS 0
246#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
247#define MCPM_SYNC_CLUSTER_CLUSTER \
248 (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
249#define MCPM_SYNC_CLUSTER_INBOUND \
250 (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
251#define MCPM_SYNC_CLUSTER_SIZE \
252 (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
253
Nicolas Pitree8db2882012-04-12 02:45:22 -0400254#endif