Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 2 | #ifndef _ASM_X86_BARRIER_H |
| 3 | #define _ASM_X86_BARRIER_H |
| 4 | |
| 5 | #include <asm/alternative.h> |
| 6 | #include <asm/nops.h> |
| 7 | |
| 8 | /* |
| 9 | * Force strict CPU ordering. |
Michael S. Tsirkin | 57d9b1b | 2016-01-28 19:02:44 +0200 | [diff] [blame] | 10 | * And yes, this might be required on UP too when we're talking |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 11 | * to devices. |
| 12 | */ |
| 13 | |
| 14 | #ifdef CONFIG_X86_32 |
Michael S. Tsirkin | 450cbdd | 2017-10-27 19:14:31 +0300 | [diff] [blame] | 15 | #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \ |
Michael S. Tsirkin | bd92247 | 2016-01-28 19:02:29 +0200 | [diff] [blame] | 16 | X86_FEATURE_XMM2) ::: "memory", "cc") |
Michael S. Tsirkin | 450cbdd | 2017-10-27 19:14:31 +0300 | [diff] [blame] | 17 | #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \ |
Michael S. Tsirkin | bd92247 | 2016-01-28 19:02:29 +0200 | [diff] [blame] | 18 | X86_FEATURE_XMM2) ::: "memory", "cc") |
Michael S. Tsirkin | 450cbdd | 2017-10-27 19:14:31 +0300 | [diff] [blame] | 19 | #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \ |
Michael S. Tsirkin | bd92247 | 2016-01-28 19:02:29 +0200 | [diff] [blame] | 20 | X86_FEATURE_XMM2) ::: "memory", "cc") |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 21 | #else |
| 22 | #define mb() asm volatile("mfence":::"memory") |
| 23 | #define rmb() asm volatile("lfence":::"memory") |
| 24 | #define wmb() asm volatile("sfence" ::: "memory") |
| 25 | #endif |
| 26 | |
Dan Williams | babdde2 | 2018-01-29 17:02:28 -0800 | [diff] [blame] | 27 | /** |
| 28 | * array_index_mask_nospec() - generate a mask that is ~0UL when the |
| 29 | * bounds check succeeds and 0 otherwise |
| 30 | * @index: array element index |
| 31 | * @size: number of elements in array |
| 32 | * |
| 33 | * Returns: |
| 34 | * 0 - (index < size) |
| 35 | */ |
| 36 | static inline unsigned long array_index_mask_nospec(unsigned long index, |
| 37 | unsigned long size) |
| 38 | { |
| 39 | unsigned long mask; |
| 40 | |
Dan Williams | eab6870 | 2018-06-07 09:13:48 -0700 | [diff] [blame] | 41 | asm volatile ("cmp %1,%2; sbb %0,%0;" |
Dan Williams | babdde2 | 2018-01-29 17:02:28 -0800 | [diff] [blame] | 42 | :"=r" (mask) |
Dan Williams | be3233f | 2018-02-06 18:22:40 -0800 | [diff] [blame] | 43 | :"g"(size),"r" (index) |
Dan Williams | babdde2 | 2018-01-29 17:02:28 -0800 | [diff] [blame] | 44 | :"cc"); |
| 45 | return mask; |
| 46 | } |
| 47 | |
| 48 | /* Override the default implementation from linux/nospec.h. */ |
| 49 | #define array_index_mask_nospec array_index_mask_nospec |
| 50 | |
Dan Williams | b3d7ad8 | 2018-01-29 17:02:33 -0800 | [diff] [blame] | 51 | /* Prevent speculative execution past this barrier. */ |
| 52 | #define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \ |
| 53 | "lfence", X86_FEATURE_LFENCE_RDTSC) |
| 54 | |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 55 | #define dma_rmb() barrier() |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 56 | #define dma_wmb() barrier() |
| 57 | |
Michael S. Tsirkin | 450cbdd | 2017-10-27 19:14:31 +0300 | [diff] [blame] | 58 | #ifdef CONFIG_X86_32 |
| 59 | #define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc") |
| 60 | #else |
| 61 | #define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc") |
| 62 | #endif |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 63 | #define __smp_rmb() dma_rmb() |
| 64 | #define __smp_wmb() barrier() |
| 65 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 66 | |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 67 | #define __smp_store_release(p, v) \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 68 | do { \ |
| 69 | compiletime_assert_atomic_type(*p); \ |
| 70 | barrier(); \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 71 | WRITE_ONCE(*p, v); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 72 | } while (0) |
| 73 | |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 74 | #define __smp_load_acquire(p) \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 75 | ({ \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 76 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 77 | compiletime_assert_atomic_type(*p); \ |
| 78 | barrier(); \ |
| 79 | ___p1; \ |
| 80 | }) |
| 81 | |
Peter Zijlstra | d00a569 | 2014-03-13 19:00:35 +0100 | [diff] [blame] | 82 | /* Atomic operations are already serializing on x86 */ |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 83 | #define __smp_mb__before_atomic() barrier() |
| 84 | #define __smp_mb__after_atomic() barrier() |
Peter Zijlstra | d00a569 | 2014-03-13 19:00:35 +0100 | [diff] [blame] | 85 | |
Michael S. Tsirkin | 300b06d | 2015-12-21 09:22:18 +0200 | [diff] [blame] | 86 | #include <asm-generic/barrier.h> |
| 87 | |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 88 | #endif /* _ASM_X86_BARRIER_H */ |