Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_COMPILER_H |
| 3 | #define __LINUX_COMPILER_H |
| 4 | |
Will Deacon | d151558 | 2017-10-24 11:22:46 +0100 | [diff] [blame] | 5 | #include <linux/compiler_types.h> |
| 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #ifndef __ASSEMBLY__ |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #ifdef __KERNEL__ |
| 10 | |
Steven Rostedt | 2ed84ee | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 11 | /* |
| 12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
| 13 | * to disable branch tracing on a per file basis. |
| 14 | */ |
Bart Van Assche | d9ad8bc | 2009-04-05 16:20:02 +0200 | [diff] [blame] | 15 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
| 16 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 17 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 18 | int expect, int is_constant); |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 19 | |
| 20 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
| 21 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
| 22 | |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 23 | #define __branch_check__(x, expect, is_constant) ({ \ |
Mikulas Patocka | 2026d35 | 2018-05-30 08:19:22 -0400 | [diff] [blame] | 24 | long ______r; \ |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 25 | static struct ftrace_likely_data \ |
Miguel Ojeda | e04462f | 2018-09-03 19:17:50 +0200 | [diff] [blame] | 26 | __aligned(4) \ |
Nick Desaulniers | bfafddd | 2019-08-28 15:55:23 -0700 | [diff] [blame] | 27 | __section(_ftrace_annotated_branch) \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 28 | ______f = { \ |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 29 | .data.func = __func__, \ |
| 30 | .data.file = __FILE__, \ |
| 31 | .data.line = __LINE__, \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 32 | }; \ |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 33 | ______r = __builtin_expect(!!(x), expect); \ |
| 34 | ftrace_likely_update(&______f, ______r, \ |
| 35 | expect, is_constant); \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 36 | ______r; \ |
| 37 | }) |
| 38 | |
| 39 | /* |
| 40 | * Using __builtin_constant_p(x) to ignore cases where the return |
| 41 | * value is always the same. This idea is taken from a similar patch |
| 42 | * written by Daniel Walker. |
| 43 | */ |
| 44 | # ifndef likely |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 45 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 46 | # endif |
| 47 | # ifndef unlikely |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 48 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 49 | # endif |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 50 | |
| 51 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
| 52 | /* |
| 53 | * "Define 'is'", Bill Clinton |
| 54 | * "Define 'if'", Steven Rostedt |
| 55 | */ |
Linus Torvalds | a15fd60 | 2019-03-20 10:26:17 -0700 | [diff] [blame] | 56 | #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) |
| 57 | |
| 58 | #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) |
| 59 | |
| 60 | #define __trace_if_value(cond) ({ \ |
| 61 | static struct ftrace_branch_data \ |
| 62 | __aligned(4) \ |
Nick Desaulniers | bfafddd | 2019-08-28 15:55:23 -0700 | [diff] [blame] | 63 | __section(_ftrace_branch) \ |
Linus Torvalds | a15fd60 | 2019-03-20 10:26:17 -0700 | [diff] [blame] | 64 | __if_trace = { \ |
| 65 | .func = __func__, \ |
| 66 | .file = __FILE__, \ |
| 67 | .line = __LINE__, \ |
| 68 | }; \ |
| 69 | (cond) ? \ |
| 70 | (__if_trace.miss_hit[1]++,1) : \ |
| 71 | (__if_trace.miss_hit[0]++,0); \ |
| 72 | }) |
| 73 | |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 74 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
| 75 | |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 76 | #else |
| 77 | # define likely(x) __builtin_expect(!!(x), 1) |
| 78 | # define unlikely(x) __builtin_expect(!!(x), 0) |
| 79 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
| 81 | /* Optimization barrier */ |
| 82 | #ifndef barrier |
| 83 | # define barrier() __memory_barrier() |
| 84 | #endif |
| 85 | |
Daniel Borkmann | 7829fb0 | 2015-04-30 04:13:52 +0200 | [diff] [blame] | 86 | #ifndef barrier_data |
| 87 | # define barrier_data(ptr) barrier() |
| 88 | #endif |
| 89 | |
Arnd Bergmann | 173a3ef | 2018-02-21 14:45:54 -0800 | [diff] [blame] | 90 | /* workaround for GCC PR82365 if needed */ |
| 91 | #ifndef barrier_before_unreachable |
| 92 | # define barrier_before_unreachable() do { } while (0) |
| 93 | #endif |
| 94 | |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 95 | /* Unreachable code */ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 96 | #ifdef CONFIG_STACK_VALIDATION |
Josh Poimboeuf | d0c2e69 | 2017-11-06 07:17:37 -0600 | [diff] [blame] | 97 | /* |
| 98 | * These macros help objtool understand GCC code flow for unreachable code. |
| 99 | * The __COUNTER__ based labels are a hack to make each instance of the macros |
| 100 | * unique, to convince GCC not to merge duplicate inline asm statements. |
| 101 | */ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 102 | #define annotate_reachable() ({ \ |
Ingo Molnar | 96af6cd | 2018-12-19 11:23:27 +0100 | [diff] [blame] | 103 | asm volatile("%c0:\n\t" \ |
| 104 | ".pushsection .discard.reachable\n\t" \ |
| 105 | ".long %c0b - .\n\t" \ |
| 106 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 107 | }) |
| 108 | #define annotate_unreachable() ({ \ |
Ingo Molnar | 96af6cd | 2018-12-19 11:23:27 +0100 | [diff] [blame] | 109 | asm volatile("%c0:\n\t" \ |
| 110 | ".pushsection .discard.unreachable\n\t" \ |
| 111 | ".long %c0b - .\n\t" \ |
| 112 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 113 | }) |
Ingo Molnar | 96af6cd | 2018-12-19 11:23:27 +0100 | [diff] [blame] | 114 | #define ASM_UNREACHABLE \ |
| 115 | "999:\n\t" \ |
| 116 | ".pushsection .discard.unreachable\n\t" \ |
| 117 | ".long 999b - .\n\t" \ |
| 118 | ".popsection\n\t" |
Josh Poimboeuf | 87b512d | 2019-06-27 20:50:46 -0500 | [diff] [blame] | 119 | |
| 120 | /* Annotate a C jump table to allow objtool to follow the code flow */ |
Nick Desaulniers | bfafddd | 2019-08-28 15:55:23 -0700 | [diff] [blame] | 121 | #define __annotate_jump_table __section(.rodata..c_jump_table) |
Josh Poimboeuf | 87b512d | 2019-06-27 20:50:46 -0500 | [diff] [blame] | 122 | |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 123 | #else |
| 124 | #define annotate_reachable() |
| 125 | #define annotate_unreachable() |
Josh Poimboeuf | 87b512d | 2019-06-27 20:50:46 -0500 | [diff] [blame] | 126 | #define __annotate_jump_table |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 127 | #endif |
| 128 | |
Kees Cook | aa5d1b8 | 2017-07-24 11:35:48 -0700 | [diff] [blame] | 129 | #ifndef ASM_UNREACHABLE |
| 130 | # define ASM_UNREACHABLE |
| 131 | #endif |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 132 | #ifndef unreachable |
ndesaulniers@google.com | fe0640e | 2018-10-15 10:22:21 -0700 | [diff] [blame] | 133 | # define unreachable() do { \ |
| 134 | annotate_unreachable(); \ |
| 135 | __builtin_unreachable(); \ |
| 136 | } while (0) |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 137 | #endif |
| 138 | |
Nicholas Piggin | b67067f | 2016-08-24 22:29:20 +1000 | [diff] [blame] | 139 | /* |
| 140 | * KENTRY - kernel entry point |
| 141 | * This can be used to annotate symbols (functions or data) that are used |
| 142 | * without their linker symbol being referenced explicitly. For example, |
| 143 | * interrupt vector handlers, or functions in the kernel image that are found |
| 144 | * programatically. |
| 145 | * |
| 146 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those |
| 147 | * are handled in their own way (with KEEP() in linker scripts). |
| 148 | * |
| 149 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the |
| 150 | * linker script. For example an architecture could KEEP() its entire |
| 151 | * boot/exception vector code rather than annotate each function and data. |
| 152 | */ |
| 153 | #ifndef KENTRY |
| 154 | # define KENTRY(sym) \ |
| 155 | extern typeof(sym) sym; \ |
| 156 | static const unsigned long __kentry_##sym \ |
| 157 | __used \ |
Nick Desaulniers | a25c13b | 2020-10-13 16:47:58 -0700 | [diff] [blame^] | 158 | __attribute__((__section__("___kentry+" #sym))) \ |
Nicholas Piggin | b67067f | 2016-08-24 22:29:20 +1000 | [diff] [blame] | 159 | = (unsigned long)&sym; |
| 160 | #endif |
| 161 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | #ifndef RELOC_HIDE |
| 163 | # define RELOC_HIDE(ptr, off) \ |
| 164 | ({ unsigned long __ptr; \ |
| 165 | __ptr = (unsigned long) (ptr); \ |
| 166 | (typeof(ptr)) (__ptr + (off)); }) |
| 167 | #endif |
| 168 | |
Cesar Eduardo Barros | fe8c8a1 | 2013-11-25 22:00:41 -0200 | [diff] [blame] | 169 | #ifndef OPTIMIZER_HIDE_VAR |
Michael S. Tsirkin | 3e2ffd6 | 2019-01-02 15:57:49 -0500 | [diff] [blame] | 170 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
| 171 | #define OPTIMIZER_HIDE_VAR(var) \ |
| 172 | __asm__ ("" : "=r" (var) : "0" (var)) |
Cesar Eduardo Barros | fe8c8a1 | 2013-11-25 22:00:41 -0200 | [diff] [blame] | 173 | #endif |
| 174 | |
Rusty Russell | 6f33d58 | 2012-11-22 12:30:25 +1030 | [diff] [blame] | 175 | /* Not-quite-unique ID. */ |
| 176 | #ifndef __UNIQUE_ID |
| 177 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
| 178 | #endif |
| 179 | |
Thomas Gleixner | 37d1a04 | 2020-06-11 20:02:46 +0200 | [diff] [blame] | 180 | /** |
| 181 | * data_race - mark an expression as containing intentional data races |
| 182 | * |
| 183 | * This data_race() macro is useful for situations in which data races |
| 184 | * should be forgiven. One example is diagnostic code that accesses |
| 185 | * shared variables but is not a part of the core synchronization design. |
| 186 | * |
| 187 | * This macro *does not* affect normal code generation, but is a hint |
| 188 | * to tooling that data races here are to be ignored. |
| 189 | */ |
| 190 | #define data_race(expr) \ |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 191 | ({ \ |
Marco Elver | 95c094f | 2020-05-21 16:20:45 +0200 | [diff] [blame] | 192 | __unqual_scalar_typeof(({ expr; })) __v = ({ \ |
| 193 | __kcsan_disable_current(); \ |
| 194 | expr; \ |
Thomas Gleixner | 37d1a04 | 2020-06-11 20:02:46 +0200 | [diff] [blame] | 195 | }); \ |
Marco Elver | 95c094f | 2020-05-21 16:20:45 +0200 | [diff] [blame] | 196 | __kcsan_enable_current(); \ |
| 197 | __v; \ |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 198 | }) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 199 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | #endif /* __KERNEL__ */ |
| 201 | |
Ard Biesheuvel | 7290d58 | 2018-08-21 21:56:09 -0700 | [diff] [blame] | 202 | /* |
| 203 | * Force the compiler to emit 'sym' as a symbol, so that we can reference |
| 204 | * it from inline assembler. Necessary in case 'sym' could be inlined |
| 205 | * otherwise, or eliminated entirely due to lack of references that are |
| 206 | * visible to the compiler. |
| 207 | */ |
| 208 | #define __ADDRESSABLE(sym) \ |
Nick Desaulniers | bfafddd | 2019-08-28 15:55:23 -0700 | [diff] [blame] | 209 | static void * __section(.discard.addressable) __used \ |
Josh Poimboeuf | 563a02b | 2020-08-18 15:57:40 +0200 | [diff] [blame] | 210 | __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; |
Ard Biesheuvel | 7290d58 | 2018-08-21 21:56:09 -0700 | [diff] [blame] | 211 | |
| 212 | /** |
| 213 | * offset_to_ptr - convert a relative memory offset to an absolute pointer |
| 214 | * @off: the address of the 32-bit offset value |
| 215 | */ |
| 216 | static inline void *offset_to_ptr(const int *off) |
| 217 | { |
| 218 | return (void *)((unsigned long)off + *off); |
| 219 | } |
| 220 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | #endif /* __ASSEMBLY__ */ |
| 222 | |
Miguel Ojeda | ec0bbef | 2018-08-30 19:25:14 +0200 | [diff] [blame] | 223 | /* &a[0] degrades to a pointer: a different type from an array */ |
| 224 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) |
Miguel Ojeda | ec0bbef | 2018-08-30 19:25:14 +0200 | [diff] [blame] | 225 | |
Borislav Petkov | a9a3ed1 | 2020-04-22 18:11:30 +0200 | [diff] [blame] | 226 | /* |
| 227 | * This is needed in functions which generate the stack canary, see |
| 228 | * arch/x86/kernel/smpboot.c::start_secondary() for an example. |
| 229 | */ |
| 230 | #define prevent_tail_call_optimization() mb() |
| 231 | |
Will Deacon | e506ea4 | 2019-10-15 16:29:32 -0700 | [diff] [blame] | 232 | #include <asm/rwonce.h> |
| 233 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | #endif /* __LINUX_COMPILER_H */ |