blob: dfd05938a3cb324d743dffeee57542ba4219d16a [file] [log] [blame]
Suman Annaeebba712018-05-11 12:03:16 -05001/* SPDX-License-Identifier: GPL-2.0 */
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -08002/*
3 * Hardware spinlock public header
4 *
5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -08008 */
9
10#ifndef __LINUX_HWSPINLOCK_H
11#define __LINUX_HWSPINLOCK_H
12
13#include <linux/err.h>
14#include <linux/sched.h>
15
16/* hwspinlock mode argument */
17#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
18#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
Baolin Wang1e6c06a2018-04-08 11:06:57 +080019#define HWLOCK_RAW 0x03
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080020
Paul Gortmaker313162d2012-01-30 11:46:54 -050021struct device;
Suman Annafb7737e2015-03-04 20:01:14 -060022struct device_node;
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080023struct hwspinlock;
Ohad Ben-Cohen300bab92011-09-06 15:39:21 +030024struct hwspinlock_device;
25struct hwspinlock_ops;
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080026
Ohad Ben-Cohenc3c12502011-09-05 23:15:06 +030027/**
28 * struct hwspinlock_pdata - platform data for hwspinlock drivers
29 * @base_id: base id for this hwspinlock device
30 *
31 * hwspinlock devices provide system-wide hardware locks that are used
32 * by remote processors that have no other way to achieve synchronization.
33 *
34 * To achieve that, each physical lock must have a system-wide id number
35 * that is agreed upon, otherwise remote processors can't possibly assume
36 * they're using the same hardware lock.
37 *
38 * Usually boards have a single hwspinlock device, which provides several
39 * hwspinlocks, and in this case, they can be trivially numbered 0 to
40 * (num-of-locks - 1).
41 *
42 * In case boards have several hwspinlocks devices, a different base id
43 * should be used for each hwspinlock device (they can't all use 0 as
44 * a starting id!).
45 *
46 * This platform data structure should be used to provide the base id
47 * for each device (which is trivially 0 when only a single hwspinlock
48 * device exists). It can be shared between different platforms, hence
49 * its location.
50 */
51struct hwspinlock_pdata {
52 int base_id;
53};
54
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080055#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
56
Ohad Ben-Cohen300bab92011-09-06 15:39:21 +030057int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
58 const struct hwspinlock_ops *ops, int base_id, int num_locks);
59int hwspin_lock_unregister(struct hwspinlock_device *bank);
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080060struct hwspinlock *hwspin_lock_request(void);
61struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
62int hwspin_lock_free(struct hwspinlock *hwlock);
Suman Annafb7737e2015-03-04 20:01:14 -060063int of_hwspin_lock_get_id(struct device_node *np, int index);
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080064int hwspin_lock_get_id(struct hwspinlock *hwlock);
65int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
66 unsigned long *);
67int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
68void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
Baolin Wang5560f702018-06-22 16:08:58 +080069int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
Baolin Wang4f1acd72018-06-22 16:08:59 +080070int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
71struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
72struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
73 unsigned int id);
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080074
75#else /* !CONFIG_HWSPINLOCK */
76
77/*
78 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
79 * enabled. We prefer to silently succeed in this case, and let the
80 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
81 * required on a given setup, users will still work.
82 *
83 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
84 * we _do_ want users to fail (no point in registering hwspinlock instances if
85 * the framework is not available).
86 *
87 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
88 * users. Others, which care, can still check this with IS_ERR.
89 */
90static inline struct hwspinlock *hwspin_lock_request(void)
91{
92 return ERR_PTR(-ENODEV);
93}
94
95static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
96{
97 return ERR_PTR(-ENODEV);
98}
99
100static inline int hwspin_lock_free(struct hwspinlock *hwlock)
101{
102 return 0;
103}
104
105static inline
106int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
107 int mode, unsigned long *flags)
108{
109 return 0;
110}
111
112static inline
113int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
114{
115 return 0;
116}
117
118static inline
119void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
120{
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800121}
122
Suman Annafb7737e2015-03-04 20:01:14 -0600123static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
124{
125 return 0;
126}
127
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800128static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
129{
130 return 0;
131}
132
Baolin Wang5560f702018-06-22 16:08:58 +0800133static inline
134int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
135{
136 return 0;
137}
138
Baolin Wang4f1acd72018-06-22 16:08:59 +0800139static inline
140int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
141{
142 return 0;
143}
144
145static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
146{
147 return ERR_PTR(-ENODEV);
148}
149
150static inline
151struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
152 unsigned int id)
153{
154 return ERR_PTR(-ENODEV);
155}
156
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800157#endif /* !CONFIG_HWSPINLOCK */
158
159/**
160 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
161 * @hwlock: an hwspinlock which we want to trylock
162 * @flags: a pointer to where the caller's interrupt state will be saved at
163 *
164 * This function attempts to lock the underlying hwspinlock, and will
165 * immediately fail if the hwspinlock is already locked.
166 *
167 * Upon a successful return from this function, preemption and local
168 * interrupts are disabled (previous interrupts state is saved at @flags),
169 * so the caller must not sleep, and is advised to release the hwspinlock
170 * as soon as possible.
171 *
172 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
173 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
174 */
175static inline
176int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
177{
178 return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
179}
180
181/**
182 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
183 * @hwlock: an hwspinlock which we want to trylock
184 *
185 * This function attempts to lock the underlying hwspinlock, and will
186 * immediately fail if the hwspinlock is already locked.
187 *
188 * Upon a successful return from this function, preemption and local
189 * interrupts are disabled, so the caller must not sleep, and is advised
190 * to release the hwspinlock as soon as possible.
191 *
192 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
193 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
194 */
195static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
196{
197 return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
198}
199
200/**
Baolin Wang1e6c06a2018-04-08 11:06:57 +0800201 * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
202 * @hwlock: an hwspinlock which we want to trylock
203 *
204 * This function attempts to lock an hwspinlock, and will immediately fail
205 * if the hwspinlock is already taken.
206 *
207 * Caution: User must protect the routine of getting hardware lock with mutex
208 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
209 * or sleepable operations under the hardware lock.
210 *
211 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
212 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
213 */
214static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
215{
216 return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
217}
218
219/**
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800220 * hwspin_trylock() - attempt to lock a specific hwspinlock
221 * @hwlock: an hwspinlock which we want to trylock
222 *
223 * This function attempts to lock an hwspinlock, and will immediately fail
224 * if the hwspinlock is already taken.
225 *
226 * Upon a successful return from this function, preemption is disabled,
227 * so the caller must not sleep, and is advised to release the hwspinlock
228 * as soon as possible. This is required in order to minimize remote cores
229 * polling on the hardware interconnect.
230 *
231 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
232 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
233 */
234static inline int hwspin_trylock(struct hwspinlock *hwlock)
235{
236 return __hwspin_trylock(hwlock, 0, NULL);
237}
238
239/**
240 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
241 * @hwlock: the hwspinlock to be locked
242 * @to: timeout value in msecs
243 * @flags: a pointer to where the caller's interrupt state will be saved at
244 *
245 * This function locks the underlying @hwlock. If the @hwlock
246 * is already taken, the function will busy loop waiting for it to
247 * be released, but give up when @timeout msecs have elapsed.
248 *
249 * Upon a successful return from this function, preemption and local interrupts
250 * are disabled (plus previous interrupt state is saved), so the caller must
251 * not sleep, and is advised to release the hwspinlock as soon as possible.
252 *
253 * Returns 0 when the @hwlock was successfully taken, and an appropriate
254 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
255 * busy after @timeout msecs). The function will never sleep.
256 */
257static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
258 unsigned int to, unsigned long *flags)
259{
260 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
261}
262
263/**
264 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
265 * @hwlock: the hwspinlock to be locked
266 * @to: timeout value in msecs
267 *
268 * This function locks the underlying @hwlock. If the @hwlock
269 * is already taken, the function will busy loop waiting for it to
270 * be released, but give up when @timeout msecs have elapsed.
271 *
272 * Upon a successful return from this function, preemption and local interrupts
273 * are disabled so the caller must not sleep, and is advised to release the
274 * hwspinlock as soon as possible.
275 *
276 * Returns 0 when the @hwlock was successfully taken, and an appropriate
277 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
278 * busy after @timeout msecs). The function will never sleep.
279 */
280static inline
281int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
282{
283 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
284}
285
286/**
Baolin Wang1e6c06a2018-04-08 11:06:57 +0800287 * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
288 * @hwlock: the hwspinlock to be locked
289 * @to: timeout value in msecs
290 *
291 * This function locks the underlying @hwlock. If the @hwlock
292 * is already taken, the function will busy loop waiting for it to
293 * be released, but give up when @timeout msecs have elapsed.
294 *
295 * Caution: User must protect the routine of getting hardware lock with mutex
296 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
297 * or sleepable operations under the hardware lock.
298 *
299 * Returns 0 when the @hwlock was successfully taken, and an appropriate
300 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
301 * busy after @timeout msecs). The function will never sleep.
302 */
303static inline
304int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
305{
306 return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
307}
308
309/**
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800310 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
311 * @hwlock: the hwspinlock to be locked
312 * @to: timeout value in msecs
313 *
314 * This function locks the underlying @hwlock. If the @hwlock
315 * is already taken, the function will busy loop waiting for it to
316 * be released, but give up when @timeout msecs have elapsed.
317 *
318 * Upon a successful return from this function, preemption is disabled
319 * so the caller must not sleep, and is advised to release the hwspinlock
320 * as soon as possible.
321 * This is required in order to minimize remote cores polling on the
322 * hardware interconnect.
323 *
324 * Returns 0 when the @hwlock was successfully taken, and an appropriate
325 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
326 * busy after @timeout msecs). The function will never sleep.
327 */
328static inline
329int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
330{
331 return __hwspin_lock_timeout(hwlock, to, 0, NULL);
332}
333
334/**
335 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
336 * @hwlock: a previously-acquired hwspinlock which we want to unlock
337 * @flags: previous caller's interrupt state to restore
338 *
339 * This function will unlock a specific hwspinlock, enable preemption and
340 * restore the previous state of the local interrupts. It should be used
341 * to undo, e.g., hwspin_trylock_irqsave().
342 *
343 * @hwlock must be already locked before calling this function: it is a bug
344 * to call unlock on a @hwlock that is already unlocked.
345 */
346static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
347 unsigned long *flags)
348{
349 __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
350}
351
352/**
353 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
354 * @hwlock: a previously-acquired hwspinlock which we want to unlock
355 *
356 * This function will unlock a specific hwspinlock, enable preemption and
357 * enable local interrupts. Should be used to undo hwspin_lock_irq().
358 *
359 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
360 * calling this function: it is a bug to call unlock on a @hwlock that is
361 * already unlocked.
362 */
363static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
364{
365 __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
366}
367
368/**
Baolin Wang1e6c06a2018-04-08 11:06:57 +0800369 * hwspin_unlock_raw() - unlock hwspinlock
370 * @hwlock: a previously-acquired hwspinlock which we want to unlock
371 *
372 * This function will unlock a specific hwspinlock.
373 *
374 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
375 * this function: it is a bug to call unlock on a @hwlock that is already
376 * unlocked.
377 */
378static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
379{
380 __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
381}
382
383/**
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800384 * hwspin_unlock() - unlock hwspinlock
385 * @hwlock: a previously-acquired hwspinlock which we want to unlock
386 *
387 * This function will unlock a specific hwspinlock and enable preemption
388 * back.
389 *
390 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
391 * this function: it is a bug to call unlock on a @hwlock that is already
392 * unlocked.
393 */
394static inline void hwspin_unlock(struct hwspinlock *hwlock)
395{
396 __hwspin_unlock(hwlock, 0, NULL);
397}
398
399#endif /* __LINUX_HWSPINLOCK_H */