blob: 551eeaa389d40cc952d2db833c1837a9265ced15 [file] [log] [blame]
Paul E. McKenney1c27b642018-01-18 19:58:55 -08001// SPDX-License-Identifier: GPL-2.0+
2//
Andrea Parri1a00b4552018-05-14 16:33:56 -07003// An earlier version of this file appeared in the companion webpage for
Paul E. McKenney1c27b642018-01-18 19:58:55 -08004// "Frightening small children and disconcerting grown-ups: Concurrency
5// in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
Andrea Parri1a00b4552018-05-14 16:33:56 -07006// which appeared in ASPLOS 2018.
Paul E. McKenney1c27b642018-01-18 19:58:55 -08007
8// ONCE
9READ_ONCE(X) __load{once}(X)
10WRITE_ONCE(X,V) { __store{once}(X,V); }
11
12// Release Acquire and friends
13smp_store_release(X,V) { __store{release}(*X,V); }
14smp_load_acquire(X) __load{acquire}(*X)
15rcu_assign_pointer(X,V) { __store{release}(X,V); }
Alan Sternbd5c0ba2018-03-07 09:27:40 -080016rcu_dereference(X) __load{once}(X)
Andrea Parribf8c6d92018-05-14 16:33:45 -070017smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
Paul E. McKenney1c27b642018-01-18 19:58:55 -080018
19// Fences
Andrea Parrid17013e2018-05-14 16:33:46 -070020smp_mb() { __fence{mb}; }
21smp_rmb() { __fence{rmb}; }
22smp_wmb() { __fence{wmb}; }
23smp_mb__before_atomic() { __fence{before-atomic}; }
24smp_mb__after_atomic() { __fence{after-atomic}; }
25smp_mb__after_spinlock() { __fence{after-spinlock}; }
Andrea Parri5b735eb2018-12-03 15:04:49 -080026smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
Paul E. McKenney1c27b642018-01-18 19:58:55 -080027
28// Exchange
29xchg(X,V) __xchg{mb}(X,V)
30xchg_relaxed(X,V) __xchg{once}(X,V)
31xchg_release(X,V) __xchg{release}(X,V)
32xchg_acquire(X,V) __xchg{acquire}(X,V)
33cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
34cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
35cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
36cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
37
38// Spinlocks
Andrea Parrid17013e2018-05-14 16:33:46 -070039spin_lock(X) { __lock(X); }
40spin_unlock(X) { __unlock(X); }
Paul E. McKenney1c27b642018-01-18 19:58:55 -080041spin_trylock(X) __trylock(X)
Luc Maranget15553dc2018-05-14 16:33:48 -070042spin_is_locked(X) __islocked(X)
Paul E. McKenney1c27b642018-01-18 19:58:55 -080043
44// RCU
45rcu_read_lock() { __fence{rcu-lock}; }
Andrea Parrid17013e2018-05-14 16:33:46 -070046rcu_read_unlock() { __fence{rcu-unlock}; }
Paul E. McKenney1c27b642018-01-18 19:58:55 -080047synchronize_rcu() { __fence{sync-rcu}; }
48synchronize_rcu_expedited() { __fence{sync-rcu}; }
49
Alan Sterna3f600d2018-11-15 11:20:37 -050050// SRCU
51srcu_read_lock(X) __srcu{srcu-lock}(X)
Luc Maranget93939982018-12-27 16:27:12 +010052srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
Alan Sterna3f600d2018-11-15 11:20:37 -050053synchronize_srcu(X) { __srcu{sync-srcu}(X); }
Paul E. McKenneya5220e72019-03-19 13:25:03 -070054synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); }
Alan Sterna3f600d2018-11-15 11:20:37 -050055
Paul E. McKenney1c27b642018-01-18 19:58:55 -080056// Atomic
57atomic_read(X) READ_ONCE(*X)
Andrea Parrid17013e2018-05-14 16:33:46 -070058atomic_set(X,V) { WRITE_ONCE(*X,V); }
Paul E. McKenney1c27b642018-01-18 19:58:55 -080059atomic_read_acquire(X) smp_load_acquire(X)
60atomic_set_release(X,V) { smp_store_release(X,V); }
61
Andrea Parrid17013e2018-05-14 16:33:46 -070062atomic_add(V,X) { __atomic_op(X,+,V); }
63atomic_sub(V,X) { __atomic_op(X,-,V); }
64atomic_inc(X) { __atomic_op(X,+,1); }
65atomic_dec(X) { __atomic_op(X,-,1); }
Paul E. McKenney1c27b642018-01-18 19:58:55 -080066
67atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
68atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
69atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
70atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
71atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
72atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
73atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
74atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
75
76atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
77atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
78atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
79atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
80atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
81atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
82atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
83atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
84
85atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
86atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
87atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
88atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
89atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
90atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
91atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
92atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
93
94atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
95atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
96atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
97atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
98atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
99atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
100atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
101atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
102
103atomic_xchg(X,V) __xchg{mb}(X,V)
104atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
105atomic_xchg_release(X,V) __xchg{release}(X,V)
106atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
107atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
108atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
109atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
110atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
111
112atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
113atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
114atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
115atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0