blob: 18a4b6890fa82f589b9609ce1e509574a5411bf5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
David Howellsf05e7982012-03-28 18:11:12 +01002#ifndef _ASM_X86_SWITCH_TO_H
3#define _ASM_X86_SWITCH_TO_H
4
Andy Lutomirskid375cf12017-11-02 00:59:16 -07005#include <linux/sched/task_stack.h>
6
David Howellsf05e7982012-03-28 18:11:12 +01007struct task_struct; /* one of the stranger aspects of C forward declarations */
Brian Gerst01003012016-08-13 12:38:19 -04008
9struct task_struct *__switch_to_asm(struct task_struct *prev,
10 struct task_struct *next);
11
Andi Kleen35ea79032013-08-05 15:02:39 -070012__visible struct task_struct *__switch_to(struct task_struct *prev,
Brian Gerst01003012016-08-13 12:38:19 -040013 struct task_struct *next);
David Howellsf05e7982012-03-28 18:11:12 +010014
Andy Lutomirskie37e43a2016-08-11 02:35:23 -070015/* This runs runs on the previous thread's stack. */
rodrigosiqueira7ac139e2017-12-15 11:15:33 -020016static inline void prepare_switch_to(struct task_struct *next)
Andy Lutomirskie37e43a2016-08-11 02:35:23 -070017{
18#ifdef CONFIG_VMAP_STACK
19 /*
20 * If we switch to a stack that has a top-level paging entry
21 * that is not present in the current mm, the resulting #PF will
22 * will be promoted to a double-fault and we'll panic. Probe
23 * the new stack now so that vmalloc_fault can fix up the page
24 * tables if needed. This can only happen if we use a stack
25 * in vmap space.
26 *
27 * We assume that the stack is aligned so that it never spans
28 * more than one top-level paging entry.
29 *
30 * To minimize cache pollution, just follow the stack pointer.
31 */
32 READ_ONCE(*(unsigned char *)next->thread.sp);
33#endif
34}
35
Brian Gerst616d2482016-08-13 12:38:20 -040036asmlinkage void ret_from_fork(void);
37
Josh Poimboeuf2c96b2f2017-01-09 12:00:24 -060038/*
39 * This is the structure pointed to by thread.sp for an inactive task. The
40 * order of the fields must match the code in __switch_to_asm().
41 */
Brian Gerst7b32aea2016-08-13 12:38:18 -040042struct inactive_task_frame {
Brian Gerst01003012016-08-13 12:38:19 -040043#ifdef CONFIG_X86_64
44 unsigned long r15;
45 unsigned long r14;
46 unsigned long r13;
47 unsigned long r12;
48#else
Peter Zijlstra64604d52019-03-19 11:35:46 +010049 unsigned long flags;
Brian Gerst01003012016-08-13 12:38:19 -040050 unsigned long si;
51 unsigned long di;
52#endif
53 unsigned long bx;
Josh Poimboeuf2c96b2f2017-01-09 12:00:24 -060054
55 /*
56 * These two fields must be together. They form a stack frame header,
57 * needed by get_frame_pointer().
58 */
Brian Gerst7b32aea2016-08-13 12:38:18 -040059 unsigned long bp;
Brian Gerst01003012016-08-13 12:38:19 -040060 unsigned long ret_addr;
Brian Gerst7b32aea2016-08-13 12:38:18 -040061};
62
Brian Gerst01003012016-08-13 12:38:19 -040063struct fork_frame {
64 struct inactive_task_frame frame;
65 struct pt_regs regs;
66};
David Howellsf05e7982012-03-28 18:11:12 +010067
David Howellsf05e7982012-03-28 18:11:12 +010068#define switch_to(prev, next, last) \
69do { \
rodrigosiqueira7ac139e2017-12-15 11:15:33 -020070 prepare_switch_to(next); \
Andy Lutomirskie37e43a2016-08-11 02:35:23 -070071 \
Brian Gerst01003012016-08-13 12:38:19 -040072 ((last) = __switch_to_asm((prev), (next))); \
David Howellsf05e7982012-03-28 18:11:12 +010073} while (0)
74
Andy Lutomirskibd7dc5a2017-11-02 00:59:09 -070075#ifdef CONFIG_X86_32
76static inline void refresh_sysenter_cs(struct thread_struct *thread)
77{
78 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
Andy Lutomirskic482fee2017-12-04 15:07:29 +010079 if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
Andy Lutomirskibd7dc5a2017-11-02 00:59:09 -070080 return;
81
Andy Lutomirskic482fee2017-12-04 15:07:29 +010082 this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
Andy Lutomirskibd7dc5a2017-11-02 00:59:09 -070083 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
84}
85#endif
86
Andy Lutomirski46f5a102017-11-02 00:59:14 -070087/* This is used when switching tasks or entering/exiting vm86 mode. */
Joerg Roedel252e1a02018-07-18 11:40:51 +020088static inline void update_task_stack(struct task_struct *task)
Andy Lutomirski46f5a102017-11-02 00:59:14 -070089{
Joerg Roedel45d7b252018-07-18 11:40:44 +020090 /* sp0 always points to the entry trampoline stack, which is constant: */
Andy Lutomirskid375cf12017-11-02 00:59:16 -070091#ifdef CONFIG_X86_32
Joerg Roedel45d7b252018-07-18 11:40:44 +020092 if (static_cpu_has(X86_FEATURE_XENPV))
93 load_sp0(task->thread.sp0);
94 else
95 this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
Andy Lutomirskid375cf12017-11-02 00:59:16 -070096#else
Joerg Roedel45d7b252018-07-18 11:40:44 +020097 /*
98 * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
99 * doesn't work on x86-32 because sp1 and
100 * cpu_current_top_of_stack have different values (because of
101 * the non-zero stack-padding on 32bit).
102 */
Andy Lutomirski7f2590a2017-12-04 15:07:23 +0100103 if (static_cpu_has(X86_FEATURE_XENPV))
104 load_sp0(task_top_of_stack(task));
Andy Lutomirskid375cf12017-11-02 00:59:16 -0700105#endif
Joerg Roedel45d7b252018-07-18 11:40:44 +0200106
Andy Lutomirski46f5a102017-11-02 00:59:14 -0700107}
108
David Howellsf05e7982012-03-28 18:11:12 +0100109#endif /* _ASM_X86_SWITCH_TO_H */