blob: dadc20adba211bc962ec9874d81756615ee495c6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Brian Gerst1a3b1d82010-01-07 11:53:33 -05002#ifndef _ASM_X86_ATOMIC64_64_H
3#define _ASM_X86_ATOMIC64_64_H
4
5#include <linux/types.h>
6#include <asm/alternative.h>
7#include <asm/cmpxchg.h>
8
9/* The 64-bit atomic type */
10
11#define ATOMIC64_INIT(i) { (i) }
12
13/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010014 * arch_atomic64_read - read atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050015 * @v: pointer of type atomic64_t
16 *
17 * Atomically reads the value of @v.
18 * Doesn't imply a read memory barrier.
19 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010020static inline long arch_atomic64_read(const atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050021{
Peter Zijlstra62e8a322015-09-18 11:13:10 +020022 return READ_ONCE((v)->counter);
Brian Gerst1a3b1d82010-01-07 11:53:33 -050023}
24
25/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010026 * arch_atomic64_set - set atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050027 * @v: pointer to type atomic64_t
28 * @i: required value
29 *
30 * Atomically sets the value of @v to @i.
31 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010032static inline void arch_atomic64_set(atomic64_t *v, long i)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050033{
Peter Zijlstra62e8a322015-09-18 11:13:10 +020034 WRITE_ONCE(v->counter, i);
Brian Gerst1a3b1d82010-01-07 11:53:33 -050035}
36
37/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010038 * arch_atomic64_add - add integer to atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050039 * @i: integer value to add
40 * @v: pointer to type atomic64_t
41 *
42 * Atomically adds @i to @v.
43 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010044static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050045{
46 asm volatile(LOCK_PREFIX "addq %1,%0"
47 : "=m" (v->counter)
48 : "er" (i), "m" (v->counter));
49}
50
51/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010052 * arch_atomic64_sub - subtract the atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050053 * @i: integer value to subtract
54 * @v: pointer to type atomic64_t
55 *
56 * Atomically subtracts @i from @v.
57 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010058static inline void arch_atomic64_sub(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050059{
60 asm volatile(LOCK_PREFIX "subq %1,%0"
61 : "=m" (v->counter)
62 : "er" (i), "m" (v->counter));
63}
64
65/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010066 * arch_atomic64_sub_and_test - subtract value from variable and test result
Brian Gerst1a3b1d82010-01-07 11:53:33 -050067 * @i: integer value to subtract
68 * @v: pointer to type atomic64_t
69 *
70 * Atomically subtracts @i from @v and returns
71 * true if the result is zero, or false for all
72 * other cases.
73 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010074static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050075{
Peter Zijlstra288e4522018-10-03 12:34:10 +020076 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
Brian Gerst1a3b1d82010-01-07 11:53:33 -050077}
Randy Dunlap4331f4d2018-09-02 19:30:53 -070078#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
Brian Gerst1a3b1d82010-01-07 11:53:33 -050079
80/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010081 * arch_atomic64_inc - increment atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050082 * @v: pointer to type atomic64_t
83 *
84 * Atomically increments @v by 1.
85 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010086static __always_inline void arch_atomic64_inc(atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050087{
88 asm volatile(LOCK_PREFIX "incq %0"
89 : "=m" (v->counter)
90 : "m" (v->counter));
91}
Randy Dunlap4331f4d2018-09-02 19:30:53 -070092#define arch_atomic64_inc arch_atomic64_inc
Brian Gerst1a3b1d82010-01-07 11:53:33 -050093
94/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010095 * arch_atomic64_dec - decrement atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050096 * @v: pointer to type atomic64_t
97 *
98 * Atomically decrements @v by 1.
99 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100100static __always_inline void arch_atomic64_dec(atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500101{
102 asm volatile(LOCK_PREFIX "decq %0"
103 : "=m" (v->counter)
104 : "m" (v->counter));
105}
Randy Dunlap4331f4d2018-09-02 19:30:53 -0700106#define arch_atomic64_dec arch_atomic64_dec
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500107
108/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100109 * arch_atomic64_dec_and_test - decrement and test
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500110 * @v: pointer to type atomic64_t
111 *
112 * Atomically decrements @v by 1 and
113 * returns true if the result is 0, or false for all other
114 * cases.
115 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100116static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500117{
Peter Zijlstra288e4522018-10-03 12:34:10 +0200118 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500119}
Randy Dunlap4331f4d2018-09-02 19:30:53 -0700120#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500121
122/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100123 * arch_atomic64_inc_and_test - increment and test
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500124 * @v: pointer to type atomic64_t
125 *
126 * Atomically increments @v by 1
127 * and returns true if the result is zero, or false for all
128 * other cases.
129 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100130static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500131{
Peter Zijlstra288e4522018-10-03 12:34:10 +0200132 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500133}
Randy Dunlap4331f4d2018-09-02 19:30:53 -0700134#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500135
136/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100137 * arch_atomic64_add_negative - add and test if negative
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500138 * @i: integer value to add
139 * @v: pointer to type atomic64_t
140 *
141 * Atomically adds @i to @v and returns true
142 * if the result is negative, or false when
143 * result is greater than or equal to zero.
144 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100145static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500146{
Peter Zijlstra288e4522018-10-03 12:34:10 +0200147 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500148}
Randy Dunlap4331f4d2018-09-02 19:30:53 -0700149#define arch_atomic64_add_negative arch_atomic64_add_negative
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500150
151/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100152 * arch_atomic64_add_return - add and return
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500153 * @i: integer value to add
154 * @v: pointer to type atomic64_t
155 *
156 * Atomically adds @i to @v and returns @i + @v
157 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100158static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500159{
Jeremy Fitzhardinge8b8bc2f2011-08-23 16:59:58 -0700160 return i + xadd(&v->counter, i);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500161}
162
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100163static inline long arch_atomic64_sub_return(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500164{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100165 return arch_atomic64_add_return(-i, v);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500166}
167
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100168static inline long arch_atomic64_fetch_add(long i, atomic64_t *v)
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200169{
170 return xadd(&v->counter, i);
171}
172
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100173static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200174{
175 return xadd(&v->counter, -i);
176}
177
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100178static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500179{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100180 return arch_cmpxchg(&v->counter, old, new);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500181}
182
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100183#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
184static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
Peter Zijlstraa9ebf302017-02-01 16:39:38 +0100185{
186 return try_cmpxchg(&v->counter, old, new);
187}
188
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100189static inline long arch_atomic64_xchg(atomic64_t *v, long new)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500190{
Mark Rutlandf9881cc2018-07-16 12:30:09 +0100191 return arch_xchg(&v->counter, new);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500192}
193
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100194static inline void arch_atomic64_and(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200195{
196 asm volatile(LOCK_PREFIX "andq %1,%0"
197 : "+m" (v->counter)
198 : "er" (i)
199 : "memory");
Peter Zijlstra7fc18452014-04-23 20:28:37 +0200200}
201
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100202static inline long arch_atomic64_fetch_and(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200203{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100204 s64 val = arch_atomic64_read(v);
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200205
206 do {
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100207 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200208 return val;
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200209}
Peter Zijlstra7fc18452014-04-23 20:28:37 +0200210
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100211static inline void arch_atomic64_or(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200212{
213 asm volatile(LOCK_PREFIX "orq %1,%0"
214 : "+m" (v->counter)
215 : "er" (i)
216 : "memory");
217}
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200218
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100219static inline long arch_atomic64_fetch_or(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200220{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100221 s64 val = arch_atomic64_read(v);
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200222
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200223 do {
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100224 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200225 return val;
226}
227
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100228static inline void arch_atomic64_xor(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200229{
230 asm volatile(LOCK_PREFIX "xorq %1,%0"
231 : "+m" (v->counter)
232 : "er" (i)
233 : "memory");
234}
235
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100236static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200237{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100238 s64 val = arch_atomic64_read(v);
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200239
240 do {
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100241 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200242 return val;
243}
Peter Zijlstra7fc18452014-04-23 20:28:37 +0200244
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500245#endif /* _ASM_X86_ATOMIC64_64_H */