Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_SYNC_BITOPS_H |
| 3 | #define _ASM_X86_SYNC_BITOPS_H |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 4 | |
| 5 | /* |
| 6 | * Copyright 1992, Linus Torvalds. |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * These have to be done with inline assembly: that way the bit-setting |
| 11 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
| 12 | * was cleared before the operation and != 0 if it was not. |
| 13 | * |
| 14 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 15 | */ |
| 16 | |
Jan Beulich | 547571b | 2019-03-27 09:15:19 -0600 | [diff] [blame] | 17 | #include <asm/rmwcc.h> |
| 18 | |
Joe Perches | 26b7fcc | 2008-03-23 01:03:38 -0700 | [diff] [blame] | 19 | #define ADDR (*(volatile long *)addr) |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 20 | |
| 21 | /** |
| 22 | * sync_set_bit - Atomically set a bit in memory |
| 23 | * @nr: the bit to set |
| 24 | * @addr: the address to start counting from |
| 25 | * |
| 26 | * This function is atomic and may not be reordered. See __set_bit() |
| 27 | * if you do not require the atomic guarantees. |
| 28 | * |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 29 | * Note that @nr may be almost arbitrarily large; this function is not |
| 30 | * restricted to acting on a single-word quantity. |
| 31 | */ |
H. Peter Anvin | 9b71050 | 2013-07-16 15:20:14 -0700 | [diff] [blame] | 32 | static inline void sync_set_bit(long nr, volatile unsigned long *addr) |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 33 | { |
Jan Beulich | 547571b | 2019-03-27 09:15:19 -0600 | [diff] [blame] | 34 | asm volatile("lock; " __ASM_SIZE(bts) " %1,%0" |
Joe Perches | 26b7fcc | 2008-03-23 01:03:38 -0700 | [diff] [blame] | 35 | : "+m" (ADDR) |
| 36 | : "Ir" (nr) |
| 37 | : "memory"); |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 38 | } |
| 39 | |
| 40 | /** |
| 41 | * sync_clear_bit - Clears a bit in memory |
| 42 | * @nr: Bit to clear |
| 43 | * @addr: Address to start counting from |
| 44 | * |
| 45 | * sync_clear_bit() is atomic and may not be reordered. However, it does |
| 46 | * not contain a memory barrier, so if it is used for locking purposes, |
Peter Zijlstra | d00a569 | 2014-03-13 19:00:35 +0100 | [diff] [blame] | 47 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 48 | * in order to ensure changes are visible on other processors. |
| 49 | */ |
H. Peter Anvin | 9b71050 | 2013-07-16 15:20:14 -0700 | [diff] [blame] | 50 | static inline void sync_clear_bit(long nr, volatile unsigned long *addr) |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 51 | { |
Jan Beulich | 547571b | 2019-03-27 09:15:19 -0600 | [diff] [blame] | 52 | asm volatile("lock; " __ASM_SIZE(btr) " %1,%0" |
Joe Perches | 26b7fcc | 2008-03-23 01:03:38 -0700 | [diff] [blame] | 53 | : "+m" (ADDR) |
| 54 | : "Ir" (nr) |
| 55 | : "memory"); |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | /** |
| 59 | * sync_change_bit - Toggle a bit in memory |
| 60 | * @nr: Bit to change |
| 61 | * @addr: Address to start counting from |
| 62 | * |
Matti Linnanvuori | 7800c0c | 2008-03-16 02:47:35 -0700 | [diff] [blame] | 63 | * sync_change_bit() is atomic and may not be reordered. |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 64 | * Note that @nr may be almost arbitrarily large; this function is not |
| 65 | * restricted to acting on a single-word quantity. |
| 66 | */ |
H. Peter Anvin | 9b71050 | 2013-07-16 15:20:14 -0700 | [diff] [blame] | 67 | static inline void sync_change_bit(long nr, volatile unsigned long *addr) |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 68 | { |
Jan Beulich | 547571b | 2019-03-27 09:15:19 -0600 | [diff] [blame] | 69 | asm volatile("lock; " __ASM_SIZE(btc) " %1,%0" |
Joe Perches | 26b7fcc | 2008-03-23 01:03:38 -0700 | [diff] [blame] | 70 | : "+m" (ADDR) |
| 71 | : "Ir" (nr) |
| 72 | : "memory"); |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | /** |
| 76 | * sync_test_and_set_bit - Set a bit and return its old value |
| 77 | * @nr: Bit to set |
| 78 | * @addr: Address to count from |
| 79 | * |
| 80 | * This operation is atomic and cannot be reordered. |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 81 | * It also implies a memory barrier. |
| 82 | */ |
Jan Beulich | 547571b | 2019-03-27 09:15:19 -0600 | [diff] [blame] | 83 | static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr) |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 84 | { |
Jan Beulich | 547571b | 2019-03-27 09:15:19 -0600 | [diff] [blame] | 85 | return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr); |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | /** |
| 89 | * sync_test_and_clear_bit - Clear a bit and return its old value |
| 90 | * @nr: Bit to clear |
| 91 | * @addr: Address to count from |
| 92 | * |
| 93 | * This operation is atomic and cannot be reordered. |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 94 | * It also implies a memory barrier. |
| 95 | */ |
H. Peter Anvin | 9b71050 | 2013-07-16 15:20:14 -0700 | [diff] [blame] | 96 | static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr) |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 97 | { |
Jan Beulich | 547571b | 2019-03-27 09:15:19 -0600 | [diff] [blame] | 98 | return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr); |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | /** |
| 102 | * sync_test_and_change_bit - Change a bit and return its old value |
| 103 | * @nr: Bit to change |
| 104 | * @addr: Address to count from |
| 105 | * |
| 106 | * This operation is atomic and cannot be reordered. |
| 107 | * It also implies a memory barrier. |
| 108 | */ |
H. Peter Anvin | 9b71050 | 2013-07-16 15:20:14 -0700 | [diff] [blame] | 109 | static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr) |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 110 | { |
Jan Beulich | 547571b | 2019-03-27 09:15:19 -0600 | [diff] [blame] | 111 | return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr); |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 112 | } |
| 113 | |
Jeremy Fitzhardinge | aa040b2 | 2008-03-22 13:27:38 -0700 | [diff] [blame] | 114 | #define sync_test_bit(nr, addr) test_bit(nr, addr) |
Chris Wright | 027a8c7 | 2006-09-25 23:32:23 -0700 | [diff] [blame] | 115 | |
| 116 | #undef ADDR |
| 117 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 118 | #endif /* _ASM_X86_SYNC_BITOPS_H */ |