Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 2 | #ifndef __ASM_PREEMPT_H |
| 3 | #define __ASM_PREEMPT_H |
| 4 | |
| 5 | #include <asm/rmwcc.h> |
| 6 | #include <asm/percpu.h> |
| 7 | #include <linux/thread_info.h> |
| 8 | |
| 9 | DECLARE_PER_CPU(int, __preempt_count); |
| 10 | |
Will Deacon | 08861d3 | 2018-09-19 13:39:26 +0100 | [diff] [blame] | 11 | /* We use the MSB mostly because its available */ |
| 12 | #define PREEMPT_NEED_RESCHED 0x80000000 |
| 13 | |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 14 | /* |
Peter Zijlstra | ba1f14f | 2013-11-28 14:26:41 +0100 | [diff] [blame] | 15 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such |
| 16 | * that a decrement hitting 0 means we can and should reschedule. |
| 17 | */ |
| 18 | #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) |
| 19 | |
| 20 | /* |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 21 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
| 22 | * that think a non-zero value indicates we cannot preempt. |
| 23 | */ |
| 24 | static __always_inline int preempt_count(void) |
| 25 | { |
Christoph Lameter | b3ca1c1 | 2014-04-07 15:39:34 -0700 | [diff] [blame] | 26 | return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 27 | } |
| 28 | |
| 29 | static __always_inline void preempt_count_set(int pc) |
| 30 | { |
Martin Schwidefsky | f285144 | 2016-11-07 14:01:00 +0100 | [diff] [blame] | 31 | int old, new; |
| 32 | |
| 33 | do { |
| 34 | old = raw_cpu_read_4(__preempt_count); |
| 35 | new = (old & PREEMPT_NEED_RESCHED) | |
| 36 | (pc & ~PREEMPT_NEED_RESCHED); |
| 37 | } while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old); |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 38 | } |
| 39 | |
| 40 | /* |
| 41 | * must be macros to avoid header recursion hell |
| 42 | */ |
Peter Zijlstra | d87b7a3 | 2015-09-28 18:11:18 +0200 | [diff] [blame] | 43 | #define init_task_preempt_count(p) do { } while (0) |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 44 | |
| 45 | #define init_idle_preempt_count(p, cpu) do { \ |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 46 | per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ |
| 47 | } while (0) |
| 48 | |
| 49 | /* |
| 50 | * We fold the NEED_RESCHED bit into the preempt count such that |
| 51 | * preempt_enable() can decrement and test for needing to reschedule with a |
| 52 | * single instruction. |
| 53 | * |
| 54 | * We invert the actual bit, so that when the decrement hits 0 we know we both |
| 55 | * need to resched (the bit is cleared) and can resched (no preempt count). |
| 56 | */ |
| 57 | |
| 58 | static __always_inline void set_preempt_need_resched(void) |
| 59 | { |
Christoph Lameter | b3ca1c1 | 2014-04-07 15:39:34 -0700 | [diff] [blame] | 60 | raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | static __always_inline void clear_preempt_need_resched(void) |
| 64 | { |
Christoph Lameter | b3ca1c1 | 2014-04-07 15:39:34 -0700 | [diff] [blame] | 65 | raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | static __always_inline bool test_preempt_need_resched(void) |
| 69 | { |
Christoph Lameter | b3ca1c1 | 2014-04-07 15:39:34 -0700 | [diff] [blame] | 70 | return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | /* |
| 74 | * The various preempt_count add/sub methods |
| 75 | */ |
| 76 | |
| 77 | static __always_inline void __preempt_count_add(int val) |
| 78 | { |
Christoph Lameter | b3ca1c1 | 2014-04-07 15:39:34 -0700 | [diff] [blame] | 79 | raw_cpu_add_4(__preempt_count, val); |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | static __always_inline void __preempt_count_sub(int val) |
| 83 | { |
Christoph Lameter | b3ca1c1 | 2014-04-07 15:39:34 -0700 | [diff] [blame] | 84 | raw_cpu_add_4(__preempt_count, -val); |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 85 | } |
| 86 | |
Peter Zijlstra | ba1f14f | 2013-11-28 14:26:41 +0100 | [diff] [blame] | 87 | /* |
| 88 | * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule |
| 89 | * a decrement which hits zero means we have no preempt_count and should |
| 90 | * reschedule. |
| 91 | */ |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 92 | static __always_inline bool __preempt_count_dec_and_test(void) |
| 93 | { |
Peter Zijlstra | 288e452 | 2018-10-03 12:34:10 +0200 | [diff] [blame] | 94 | return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | /* |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 98 | * Returns true when we need to resched and can (barring IRQ state). |
| 99 | */ |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 100 | static __always_inline bool should_resched(int preempt_offset) |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 101 | { |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 102 | return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 103 | } |
| 104 | |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 105 | #ifdef CONFIG_PREEMPT |
| 106 | extern asmlinkage void ___preempt_schedule(void); |
Josh Poimboeuf | f5caf62 | 2017-09-20 16:24:33 -0500 | [diff] [blame] | 107 | # define __preempt_schedule() \ |
| 108 | asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) |
Josh Poimboeuf | 821eae7 | 2016-02-18 11:41:58 -0600 | [diff] [blame] | 109 | |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 110 | extern asmlinkage void preempt_schedule(void); |
Frederic Weisbecker | 4eaca0a | 2015-06-04 17:39:08 +0200 | [diff] [blame] | 111 | extern asmlinkage void ___preempt_schedule_notrace(void); |
Josh Poimboeuf | f5caf62 | 2017-09-20 16:24:33 -0500 | [diff] [blame] | 112 | # define __preempt_schedule_notrace() \ |
| 113 | asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) |
| 114 | |
Frederic Weisbecker | 4eaca0a | 2015-06-04 17:39:08 +0200 | [diff] [blame] | 115 | extern asmlinkage void preempt_schedule_notrace(void); |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 116 | #endif |
| 117 | |
Peter Zijlstra | c2daa3b | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 118 | #endif /* __ASM_PREEMPT_H */ |