blob: 0f7fd205ab7eacc973989bf17b6ea02026fbf934 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_COMPILER_H
3#define __LINUX_COMPILER_H
4
Will Deacond1515582017-10-24 11:22:46 +01005#include <linux/compiler_types.h>
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#ifndef __ASSEMBLY__
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#ifdef __KERNEL__
10
Steven Rostedt2ed84ee2008-11-12 15:24:24 -050011/*
12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13 * to disable branch tracing on a per file basis.
14 */
Bart Van Assched9ad8bc2009-04-05 16:20:02 +020015#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
Steven Rostedt (VMware)134e6a02017-01-19 08:57:14 -050017void ftrace_likely_update(struct ftrace_likely_data *f, int val,
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050018 int expect, int is_constant);
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050019
20#define likely_notrace(x) __builtin_expect(!!(x), 1)
21#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
22
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050023#define __branch_check__(x, expect, is_constant) ({ \
Mikulas Patocka2026d352018-05-30 08:19:22 -040024 long ______r; \
Steven Rostedt (VMware)134e6a02017-01-19 08:57:14 -050025 static struct ftrace_likely_data \
Miguel Ojedae04462f2018-09-03 19:17:50 +020026 __aligned(4) \
Joe Perches33def842020-10-21 19:36:07 -070027 __section("_ftrace_annotated_branch") \
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050028 ______f = { \
Steven Rostedt (VMware)134e6a02017-01-19 08:57:14 -050029 .data.func = __func__, \
30 .data.file = __FILE__, \
31 .data.line = __LINE__, \
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050032 }; \
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050033 ______r = __builtin_expect(!!(x), expect); \
34 ftrace_likely_update(&______f, ______r, \
35 expect, is_constant); \
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050036 ______r; \
37 })
38
39/*
40 * Using __builtin_constant_p(x) to ignore cases where the return
41 * value is always the same. This idea is taken from a similar patch
42 * written by Daniel Walker.
43 */
44# ifndef likely
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050045# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050046# endif
47# ifndef unlikely
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050048# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050049# endif
Steven Rostedt2bcd5212008-11-21 01:30:54 -050050
51#ifdef CONFIG_PROFILE_ALL_BRANCHES
52/*
53 * "Define 'is'", Bill Clinton
54 * "Define 'if'", Steven Rostedt
55 */
Linus Torvaldsa15fd602019-03-20 10:26:17 -070056#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57
58#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59
60#define __trace_if_value(cond) ({ \
61 static struct ftrace_branch_data \
62 __aligned(4) \
Joe Perches33def842020-10-21 19:36:07 -070063 __section("_ftrace_branch") \
Linus Torvaldsa15fd602019-03-20 10:26:17 -070064 __if_trace = { \
65 .func = __func__, \
66 .file = __FILE__, \
67 .line = __LINE__, \
68 }; \
69 (cond) ? \
70 (__if_trace.miss_hit[1]++,1) : \
71 (__if_trace.miss_hit[0]++,0); \
72})
73
Steven Rostedt2bcd5212008-11-21 01:30:54 -050074#endif /* CONFIG_PROFILE_ALL_BRANCHES */
75
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050076#else
77# define likely(x) __builtin_expect(!!(x), 1)
78# define unlikely(x) __builtin_expect(!!(x), 0)
Steven Rostedt (VMware)2f0df492020-12-11 16:37:54 -050079# define likely_notrace(x) likely(x)
80# define unlikely_notrace(x) unlikely(x)
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050081#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83/* Optimization barrier */
84#ifndef barrier
Arvind Sankar3347acc2020-11-13 22:51:59 -080085/* The "volatile" is due to gcc bugs */
86# define barrier() __asm__ __volatile__("": : :"memory")
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#endif
88
Daniel Borkmann7829fb02015-04-30 04:13:52 +020089#ifndef barrier_data
Arvind Sankar3347acc2020-11-13 22:51:59 -080090/*
91 * This version is i.e. to prevent dead stores elimination on @ptr
92 * where gcc and llvm may behave differently when otherwise using
93 * normal barrier(): while gcc behavior gets along with a normal
94 * barrier(), llvm needs an explicit input variable to be assumed
95 * clobbered. The issue is as follows: while the inline asm might
96 * access any memory it wants, the compiler could have fit all of
97 * @ptr into memory registers instead, and since @ptr never escaped
98 * from that, it proved that the inline asm wasn't touching any of
99 * it. This version works well with both compilers, i.e. we're telling
100 * the compiler that the inline asm absolutely may see the contents
101 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
102 */
103# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
Daniel Borkmann7829fb02015-04-30 04:13:52 +0200104#endif
105
Arnd Bergmann173a3ef2018-02-21 14:45:54 -0800106/* workaround for GCC PR82365 if needed */
107#ifndef barrier_before_unreachable
108# define barrier_before_unreachable() do { } while (0)
109#endif
110
David Daney38938c82009-12-04 17:44:50 -0800111/* Unreachable code */
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500112#ifdef CONFIG_STACK_VALIDATION
Josh Poimboeufd0c2e692017-11-06 07:17:37 -0600113/*
114 * These macros help objtool understand GCC code flow for unreachable code.
115 * The __COUNTER__ based labels are a hack to make each instance of the macros
116 * unique, to convince GCC not to merge duplicate inline asm statements.
117 */
Vasily Gorbikf1069a82021-05-19 15:03:08 +0200118#define __stringify_label(n) #n
119
Vasily Gorbikf1069a82021-05-19 15:03:08 +0200120#define __annotate_unreachable(c) ({ \
121 asm volatile(__stringify_label(c) ":\n\t" \
Ingo Molnar96af6cd2018-12-19 11:23:27 +0100122 ".pushsection .discard.unreachable\n\t" \
Vasily Gorbikf1069a82021-05-19 15:03:08 +0200123 ".long " __stringify_label(c) "b - .\n\t" \
Josh Poimboeuf50f27a22021-11-08 14:35:59 -0800124 ".popsection\n\t" : : "i" (c)); \
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500125})
Vasily Gorbikf1069a82021-05-19 15:03:08 +0200126#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
127
Nick Desaulniersf7b95b32022-02-02 12:55:53 -0800128#define ASM_REACHABLE \
129 "998:\n\t" \
130 ".pushsection .discard.reachable\n\t" \
131 ".long 998b - .\n\t" \
Ingo Molnar96af6cd2018-12-19 11:23:27 +0100132 ".popsection\n\t"
Josh Poimboeuf87b512d2019-06-27 20:50:46 -0500133
134/* Annotate a C jump table to allow objtool to follow the code flow */
Joe Perches33def842020-10-21 19:36:07 -0700135#define __annotate_jump_table __section(".rodata..c_jump_table")
Josh Poimboeuf87b512d2019-06-27 20:50:46 -0500136
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500137#else
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500138#define annotate_unreachable()
Nick Desaulniersf7b95b32022-02-02 12:55:53 -0800139# define ASM_REACHABLE
Josh Poimboeuf87b512d2019-06-27 20:50:46 -0500140#define __annotate_jump_table
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500141#endif
142
David Daney38938c82009-12-04 17:44:50 -0800143#ifndef unreachable
ndesaulniers@google.comfe0640e2018-10-15 10:22:21 -0700144# define unreachable() do { \
145 annotate_unreachable(); \
146 __builtin_unreachable(); \
147} while (0)
David Daney38938c82009-12-04 17:44:50 -0800148#endif
149
Nicholas Pigginb67067f2016-08-24 22:29:20 +1000150/*
151 * KENTRY - kernel entry point
152 * This can be used to annotate symbols (functions or data) that are used
153 * without their linker symbol being referenced explicitly. For example,
154 * interrupt vector handlers, or functions in the kernel image that are found
155 * programatically.
156 *
157 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
158 * are handled in their own way (with KEEP() in linker scripts).
159 *
160 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
161 * linker script. For example an architecture could KEEP() its entire
162 * boot/exception vector code rather than annotate each function and data.
163 */
164#ifndef KENTRY
165# define KENTRY(sym) \
166 extern typeof(sym) sym; \
167 static const unsigned long __kentry_##sym \
168 __used \
Nick Desaulniersa25c13b2020-10-13 16:47:58 -0700169 __attribute__((__section__("___kentry+" #sym))) \
Nicholas Pigginb67067f2016-08-24 22:29:20 +1000170 = (unsigned long)&sym;
171#endif
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173#ifndef RELOC_HIDE
174# define RELOC_HIDE(ptr, off) \
175 ({ unsigned long __ptr; \
176 __ptr = (unsigned long) (ptr); \
177 (typeof(ptr)) (__ptr + (off)); })
178#endif
179
Guenter Roeckf6b5f1a2021-09-14 20:52:24 -0700180#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
181
Cesar Eduardo Barrosfe8c8a12013-11-25 22:00:41 -0200182#ifndef OPTIMIZER_HIDE_VAR
Michael S. Tsirkin3e2ffd62019-01-02 15:57:49 -0500183/* Make the optimizer believe the variable can be manipulated arbitrarily. */
184#define OPTIMIZER_HIDE_VAR(var) \
185 __asm__ ("" : "=r" (var) : "0" (var))
Cesar Eduardo Barrosfe8c8a12013-11-25 22:00:41 -0200186#endif
187
Rusty Russell6f33d582012-11-22 12:30:25 +1030188/* Not-quite-unique ID. */
189#ifndef __UNIQUE_ID
190# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
191#endif
192
Thomas Gleixner37d1a042020-06-11 20:02:46 +0200193/**
194 * data_race - mark an expression as containing intentional data races
195 *
196 * This data_race() macro is useful for situations in which data races
197 * should be forgiven. One example is diagnostic code that accesses
198 * shared variables but is not a part of the core synchronization design.
199 *
200 * This macro *does not* affect normal code generation, but is a hint
201 * to tooling that data races here are to be ignored.
202 */
203#define data_race(expr) \
Andrey Ryabinind9764412015-10-19 11:37:17 +0300204({ \
Marco Elver95c094f2020-05-21 16:20:45 +0200205 __unqual_scalar_typeof(({ expr; })) __v = ({ \
206 __kcsan_disable_current(); \
207 expr; \
Thomas Gleixner37d1a042020-06-11 20:02:46 +0200208 }); \
Marco Elver95c094f2020-05-21 16:20:45 +0200209 __kcsan_enable_current(); \
210 __v; \
Andrey Ryabinind9764412015-10-19 11:37:17 +0300211})
Christian Borntraeger230fa252014-11-25 10:01:16 +0100212
Mark Rutland590e8a082021-06-02 16:37:01 +0100213/*
214 * With CONFIG_CFI_CLANG, the compiler replaces function addresses in
215 * instrumented C code with jump table addresses. Architectures that
216 * support CFI can define this macro to return the actual function address
217 * when needed.
218 */
219#ifndef function_nocfi
220#define function_nocfi(x) (x)
221#endif
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223#endif /* __KERNEL__ */
224
Ard Biesheuvel7290d582018-08-21 21:56:09 -0700225/*
226 * Force the compiler to emit 'sym' as a symbol, so that we can reference
227 * it from inline assembler. Necessary in case 'sym' could be inlined
228 * otherwise, or eliminated entirely due to lack of references that are
229 * visible to the compiler.
230 */
231#define __ADDRESSABLE(sym) \
Joe Perches33def842020-10-21 19:36:07 -0700232 static void * __section(".discard.addressable") __used \
Josh Poimboeuf563a02b2020-08-18 15:57:40 +0200233 __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
Ard Biesheuvel7290d582018-08-21 21:56:09 -0700234
235/**
236 * offset_to_ptr - convert a relative memory offset to an absolute pointer
237 * @off: the address of the 32-bit offset value
238 */
239static inline void *offset_to_ptr(const int *off)
240{
241 return (void *)((unsigned long)off + *off);
242}
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244#endif /* __ASSEMBLY__ */
245
Miguel Ojedaec0bbef2018-08-30 19:25:14 +0200246/* &a[0] degrades to a pointer: a different type from an array */
247#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
Miguel Ojedaec0bbef2018-08-30 19:25:14 +0200248
Borislav Petkova9a3ed12020-04-22 18:11:30 +0200249/*
250 * This is needed in functions which generate the stack canary, see
251 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
252 */
253#define prevent_tail_call_optimization() mb()
254
Will Deacone506ea42019-10-15 16:29:32 -0700255#include <asm/rwonce.h>
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257#endif /* __LINUX_COMPILER_H */