blob: 010cd6e4eafc6e0b7b4846018498b5fddb2fe824 [file] [log] [blame]
David Howellsf05e7982012-03-28 18:11:12 +01001#ifndef _ASM_X86_SWITCH_TO_H
2#define _ASM_X86_SWITCH_TO_H
3
Andy Lutomirskid375cf12017-11-02 00:59:16 -07004#include <linux/sched/task_stack.h>
5
David Howellsf05e7982012-03-28 18:11:12 +01006struct task_struct; /* one of the stranger aspects of C forward declarations */
Brian Gerst01003012016-08-13 12:38:19 -04007
8struct task_struct *__switch_to_asm(struct task_struct *prev,
9 struct task_struct *next);
10
Andi Kleen35ea79032013-08-05 15:02:39 -070011__visible struct task_struct *__switch_to(struct task_struct *prev,
Brian Gerst01003012016-08-13 12:38:19 -040012 struct task_struct *next);
David Howellsf05e7982012-03-28 18:11:12 +010013struct tss_struct;
14void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
15 struct tss_struct *tss);
16
Andy Lutomirskie37e43a2016-08-11 02:35:23 -070017/* This runs runs on the previous thread's stack. */
18static inline void prepare_switch_to(struct task_struct *prev,
19 struct task_struct *next)
20{
21#ifdef CONFIG_VMAP_STACK
22 /*
23 * If we switch to a stack that has a top-level paging entry
24 * that is not present in the current mm, the resulting #PF will
25 * will be promoted to a double-fault and we'll panic. Probe
26 * the new stack now so that vmalloc_fault can fix up the page
27 * tables if needed. This can only happen if we use a stack
28 * in vmap space.
29 *
30 * We assume that the stack is aligned so that it never spans
31 * more than one top-level paging entry.
32 *
33 * To minimize cache pollution, just follow the stack pointer.
34 */
35 READ_ONCE(*(unsigned char *)next->thread.sp);
36#endif
37}
38
Brian Gerst616d2482016-08-13 12:38:20 -040039asmlinkage void ret_from_fork(void);
40
Josh Poimboeuf2c96b2f2017-01-09 12:00:24 -060041/*
42 * This is the structure pointed to by thread.sp for an inactive task. The
43 * order of the fields must match the code in __switch_to_asm().
44 */
Brian Gerst7b32aea2016-08-13 12:38:18 -040045struct inactive_task_frame {
Brian Gerst01003012016-08-13 12:38:19 -040046#ifdef CONFIG_X86_64
47 unsigned long r15;
48 unsigned long r14;
49 unsigned long r13;
50 unsigned long r12;
51#else
52 unsigned long si;
53 unsigned long di;
54#endif
55 unsigned long bx;
Josh Poimboeuf2c96b2f2017-01-09 12:00:24 -060056
57 /*
58 * These two fields must be together. They form a stack frame header,
59 * needed by get_frame_pointer().
60 */
Brian Gerst7b32aea2016-08-13 12:38:18 -040061 unsigned long bp;
Brian Gerst01003012016-08-13 12:38:19 -040062 unsigned long ret_addr;
Brian Gerst7b32aea2016-08-13 12:38:18 -040063};
64
Brian Gerst01003012016-08-13 12:38:19 -040065struct fork_frame {
66 struct inactive_task_frame frame;
67 struct pt_regs regs;
68};
David Howellsf05e7982012-03-28 18:11:12 +010069
David Howellsf05e7982012-03-28 18:11:12 +010070#define switch_to(prev, next, last) \
71do { \
Andy Lutomirskie37e43a2016-08-11 02:35:23 -070072 prepare_switch_to(prev, next); \
73 \
Brian Gerst01003012016-08-13 12:38:19 -040074 ((last) = __switch_to_asm((prev), (next))); \
David Howellsf05e7982012-03-28 18:11:12 +010075} while (0)
76
Andy Lutomirskibd7dc5a2017-11-02 00:59:09 -070077#ifdef CONFIG_X86_32
78static inline void refresh_sysenter_cs(struct thread_struct *thread)
79{
80 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
81 if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
82 return;
83
84 this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
85 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
86}
87#endif
88
Andy Lutomirski46f5a102017-11-02 00:59:14 -070089/* This is used when switching tasks or entering/exiting vm86 mode. */
90static inline void update_sp0(struct task_struct *task)
91{
Andy Lutomirskid375cf12017-11-02 00:59:16 -070092#ifdef CONFIG_X86_32
Andy Lutomirski46f5a102017-11-02 00:59:14 -070093 load_sp0(task->thread.sp0);
Andy Lutomirskid375cf12017-11-02 00:59:16 -070094#else
95 load_sp0(task_top_of_stack(task));
96#endif
Andy Lutomirski46f5a102017-11-02 00:59:14 -070097}
98
David Howellsf05e7982012-03-28 18:11:12 +010099#endif /* _ASM_X86_SWITCH_TO_H */