Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2002 MontaVista Software Inc. |
| 4 | * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | #ifndef _ASM_FPU_H |
| 7 | #define _ASM_FPU_H |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 10 | #include <linux/sched/task_stack.h> |
Arnd Bergmann | fc69910 | 2017-03-08 08:29:31 +0100 | [diff] [blame] | 11 | #include <linux/ptrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/thread_info.h> |
Jiri Slaby | 1977f03 | 2007-10-18 23:40:25 -0700 | [diff] [blame] | 13 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
| 15 | #include <asm/mipsregs.h> |
| 16 | #include <asm/cpu.h> |
| 17 | #include <asm/cpu-features.h> |
Ralf Baechle | e0cc3a4 | 2014-04-28 22:34:01 +0200 | [diff] [blame] | 18 | #include <asm/fpu_emulator.h> |
Chris Dearman | 0b62495 | 2007-05-08 16:09:13 +0100 | [diff] [blame] | 19 | #include <asm/hazards.h> |
James Hogan | 0c7e2bc | 2017-03-04 00:32:03 +0000 | [diff] [blame] | 20 | #include <asm/ptrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/processor.h> |
| 22 | #include <asm/current.h> |
Paul Burton | 33c771b | 2014-07-11 16:44:30 +0100 | [diff] [blame] | 23 | #include <asm/msa.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Ralf Baechle | f088fc8 | 2006-04-05 09:45:47 +0100 | [diff] [blame] | 25 | #ifdef CONFIG_MIPS_MT_FPAFF |
| 26 | #include <asm/mips_mt.h> |
| 27 | #endif |
| 28 | |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 29 | /* |
| 30 | * This enum specifies a mode in which we want the FPU to operate, for cores |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 31 | * which implement the Status.FR bit. Note that the bottom bit of the value |
| 32 | * purposefully matches the desired value of the Status.FR bit. |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 33 | */ |
| 34 | enum fpu_mode { |
| 35 | FPU_32BIT = 0, /* FR = 0 */ |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 36 | FPU_64BIT, /* FR = 1, FRE = 0 */ |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 37 | FPU_AS_IS, |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 38 | FPU_HYBRID, /* FR = 1, FRE = 1 */ |
| 39 | |
| 40 | #define FPU_FR_MASK 0x1 |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 41 | }; |
| 42 | |
Paul Burton | 9ec5593 | 2018-11-07 23:14:04 +0000 | [diff] [blame] | 43 | #ifdef CONFIG_MIPS_FP_SUPPORT |
| 44 | |
| 45 | extern void _save_fp(struct task_struct *); |
| 46 | extern void _restore_fp(struct task_struct *); |
| 47 | |
Paul Burton | 84ab45b3 | 2015-01-30 12:09:37 +0000 | [diff] [blame] | 48 | #define __disable_fpu() \ |
| 49 | do { \ |
| 50 | clear_c0_status(ST0_CU1); \ |
| 51 | disable_fpu_hazard(); \ |
| 52 | } while (0) |
| 53 | |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 54 | static inline int __enable_fpu(enum fpu_mode mode) |
| 55 | { |
| 56 | int fr; |
| 57 | |
| 58 | switch (mode) { |
| 59 | case FPU_AS_IS: |
| 60 | /* just enable the FPU in its current mode */ |
| 61 | set_c0_status(ST0_CU1); |
| 62 | enable_fpu_hazard(); |
| 63 | return 0; |
| 64 | |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 65 | case FPU_HYBRID: |
| 66 | if (!cpu_has_fre) |
| 67 | return SIGFPE; |
| 68 | |
| 69 | /* set FRE */ |
Ralf Baechle | d33e6fe | 2014-12-17 11:46:40 +0100 | [diff] [blame] | 70 | set_c0_config5(MIPS_CONF5_FRE); |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 71 | goto fr_common; |
| 72 | |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 73 | case FPU_64BIT: |
Markos Chandras | fcc53b5 | 2015-07-16 15:30:04 +0100 | [diff] [blame] | 74 | #if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \ |
Markos Chandras | 6134d94 | 2015-01-30 10:20:28 +0000 | [diff] [blame] | 75 | || defined(CONFIG_64BIT)) |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 76 | /* we only have a 32-bit FPU */ |
| 77 | return SIGFPE; |
| 78 | #endif |
| 79 | /* fall through */ |
| 80 | case FPU_32BIT: |
Ralf Baechle | b0c34f6 | 2014-12-17 11:39:30 +0100 | [diff] [blame] | 81 | if (cpu_has_fre) { |
| 82 | /* clear FRE */ |
Ralf Baechle | d33e6fe | 2014-12-17 11:46:40 +0100 | [diff] [blame] | 83 | clear_c0_config5(MIPS_CONF5_FRE); |
Ralf Baechle | b0c34f6 | 2014-12-17 11:39:30 +0100 | [diff] [blame] | 84 | } |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 85 | fr_common: |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 86 | /* set CU1 & change FR appropriately */ |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 87 | fr = (int)mode & FPU_FR_MASK; |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 88 | change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0)); |
| 89 | enable_fpu_hazard(); |
| 90 | |
| 91 | /* check FR has the desired value */ |
Paul Burton | 84ab45b3 | 2015-01-30 12:09:37 +0000 | [diff] [blame] | 92 | if (!!(read_c0_status() & ST0_FR) == !!fr) |
| 93 | return 0; |
| 94 | |
| 95 | /* unsupported FR value */ |
| 96 | __disable_fpu(); |
| 97 | return SIGFPE; |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 98 | |
| 99 | default: |
| 100 | BUG(); |
| 101 | } |
Aaro Koskinen | 97b8b16b | 2014-02-05 22:05:44 +0200 | [diff] [blame] | 102 | |
| 103 | return SIGFPE; |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 104 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | #define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU) |
| 107 | |
Ralf Baechle | 1d74f6b | 2005-05-09 13:16:07 +0000 | [diff] [blame] | 108 | static inline int __is_fpu_owner(void) |
| 109 | { |
| 110 | return test_thread_flag(TIF_USEDFPU); |
| 111 | } |
| 112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | static inline int is_fpu_owner(void) |
| 114 | { |
Ralf Baechle | 1d74f6b | 2005-05-09 13:16:07 +0000 | [diff] [blame] | 115 | return cpu_has_fpu && __is_fpu_owner(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | } |
| 117 | |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 118 | static inline int __own_fpu(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | { |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 120 | enum fpu_mode mode; |
| 121 | int ret; |
| 122 | |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 123 | if (test_thread_flag(TIF_HYBRID_FPREGS)) |
| 124 | mode = FPU_HYBRID; |
| 125 | else |
| 126 | mode = !test_thread_flag(TIF_32BIT_FPREGS); |
| 127 | |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 128 | ret = __enable_fpu(mode); |
| 129 | if (ret) |
| 130 | return ret; |
| 131 | |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 132 | KSTK_STATUS(current) |= ST0_CU1; |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 133 | if (mode == FPU_64BIT || mode == FPU_HYBRID) |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 134 | KSTK_STATUS(current) |= ST0_FR; |
| 135 | else /* mode == FPU_32BIT */ |
| 136 | KSTK_STATUS(current) &= ~ST0_FR; |
| 137 | |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 138 | set_thread_flag(TIF_USEDFPU); |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 139 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | } |
| 141 | |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 142 | static inline int own_fpu_inatomic(int restore) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | { |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 144 | int ret = 0; |
| 145 | |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 146 | if (cpu_has_fpu && !__is_fpu_owner()) { |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 147 | ret = __own_fpu(); |
| 148 | if (restore && !ret) |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 149 | _restore_fp(current); |
| 150 | } |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 151 | return ret; |
Atsushi Nemoto | faea623 | 2007-04-16 23:19:44 +0900 | [diff] [blame] | 152 | } |
| 153 | |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 154 | static inline int own_fpu(int restore) |
Atsushi Nemoto | faea623 | 2007-04-16 23:19:44 +0900 | [diff] [blame] | 155 | { |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 156 | int ret; |
| 157 | |
Atsushi Nemoto | faea623 | 2007-04-16 23:19:44 +0900 | [diff] [blame] | 158 | preempt_disable(); |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 159 | ret = own_fpu_inatomic(restore); |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 160 | preempt_enable(); |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 161 | return ret; |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 162 | } |
| 163 | |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 164 | static inline void lose_fpu_inatomic(int save, struct task_struct *tsk) |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 165 | { |
Paul Burton | 33c771b | 2014-07-11 16:44:30 +0100 | [diff] [blame] | 166 | if (is_msa_enabled()) { |
| 167 | if (save) { |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 168 | save_msa(tsk); |
| 169 | tsk->thread.fpu.fcr31 = |
Manuel Lauss | 842dfc1 | 2014-11-07 14:13:54 +0100 | [diff] [blame] | 170 | read_32bit_cp1_register(CP1_STATUS); |
Paul Burton | 33c771b | 2014-07-11 16:44:30 +0100 | [diff] [blame] | 171 | } |
| 172 | disable_msa(); |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 173 | clear_tsk_thread_flag(tsk, TIF_USEDMSA); |
James Hogan | acaf6a9 | 2015-02-25 13:08:05 +0000 | [diff] [blame] | 174 | __disable_fpu(); |
Paul Burton | 33c771b | 2014-07-11 16:44:30 +0100 | [diff] [blame] | 175 | } else if (is_fpu_owner()) { |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 176 | if (save) |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 177 | _save_fp(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | __disable_fpu(); |
James Hogan | 00fe56d | 2016-02-01 13:50:37 +0000 | [diff] [blame] | 179 | } else { |
| 180 | /* FPU should not have been left enabled with no owner */ |
| 181 | WARN(read_c0_status() & ST0_CU1, |
| 182 | "Orphaned FPU left enabled"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | } |
Paul Burton | 1a3d595 | 2015-08-03 08:49:30 -0700 | [diff] [blame] | 184 | KSTK_STATUS(tsk) &= ~ST0_CU1; |
| 185 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); |
| 186 | } |
| 187 | |
| 188 | static inline void lose_fpu(int save) |
| 189 | { |
| 190 | preempt_disable(); |
| 191 | lose_fpu_inatomic(save, current); |
Atsushi Nemoto | 53dc802 | 2007-03-10 01:07:45 +0900 | [diff] [blame] | 192 | preempt_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | } |
| 194 | |
Paul Burton | cc97ab2 | 2018-11-07 23:13:59 +0000 | [diff] [blame] | 195 | /** |
| 196 | * init_fp_ctx() - Initialize task FP context |
| 197 | * @target: The task whose FP context should be initialized. |
| 198 | * |
| 199 | * Initializes the FP context of the target task to sane default values if that |
| 200 | * target task does not already have valid FP context. Once the context has |
| 201 | * been initialized, the task will be marked as having used FP & thus having |
| 202 | * valid FP context. |
| 203 | * |
| 204 | * Returns: true if context is initialized, else false. |
| 205 | */ |
| 206 | static inline bool init_fp_ctx(struct task_struct *target) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | { |
Paul Burton | cc97ab2 | 2018-11-07 23:13:59 +0000 | [diff] [blame] | 208 | /* If FP has been used then the target already has context */ |
| 209 | if (tsk_used_math(target)) |
| 210 | return false; |
Paul Burton | 597ce17 | 2013-11-22 13:12:07 +0000 | [diff] [blame] | 211 | |
Paul Burton | cc97ab2 | 2018-11-07 23:13:59 +0000 | [diff] [blame] | 212 | /* Begin with data registers set to all 1s... */ |
| 213 | memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); |
Ralf Baechle | b0c34f6 | 2014-12-17 11:39:30 +0100 | [diff] [blame] | 214 | |
Paul Burton | cc97ab2 | 2018-11-07 23:13:59 +0000 | [diff] [blame] | 215 | /* FCSR has been preset by `mips_set_personality_nan'. */ |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 216 | |
Paul Burton | cc97ab2 | 2018-11-07 23:13:59 +0000 | [diff] [blame] | 217 | /* |
| 218 | * Record that the target has "used" math, such that the context |
| 219 | * just initialised, and any modifications made by the caller, |
| 220 | * aren't discarded. |
| 221 | */ |
| 222 | set_stopped_child_used_math(target); |
Paul Burton | 4227a2d | 2014-09-11 08:30:20 +0100 | [diff] [blame] | 223 | |
Paul Burton | cc97ab2 | 2018-11-07 23:13:59 +0000 | [diff] [blame] | 224 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | static inline void save_fp(struct task_struct *tsk) |
| 228 | { |
| 229 | if (cpu_has_fpu) |
| 230 | _save_fp(tsk); |
| 231 | } |
| 232 | |
| 233 | static inline void restore_fp(struct task_struct *tsk) |
| 234 | { |
| 235 | if (cpu_has_fpu) |
| 236 | _restore_fp(tsk); |
| 237 | } |
| 238 | |
Paul Burton | bbd426f | 2014-02-13 11:26:41 +0000 | [diff] [blame] | 239 | static inline union fpureg *get_fpu_regs(struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | { |
Atsushi Nemoto | e04582b | 2006-10-09 00:10:01 +0900 | [diff] [blame] | 241 | if (tsk == current) { |
| 242 | preempt_disable(); |
| 243 | if (is_fpu_owner()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | _save_fp(current); |
Atsushi Nemoto | e04582b | 2006-10-09 00:10:01 +0900 | [diff] [blame] | 245 | preempt_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } |
| 247 | |
Atsushi Nemoto | eae8907 | 2006-05-16 01:26:03 +0900 | [diff] [blame] | 248 | return tsk->thread.fpu.fpr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | } |
| 250 | |
Paul Burton | 9ec5593 | 2018-11-07 23:14:04 +0000 | [diff] [blame] | 251 | #else /* !CONFIG_MIPS_FP_SUPPORT */ |
| 252 | |
| 253 | /* |
| 254 | * When FP support is disabled we provide only a minimal set of stub functions |
| 255 | * to avoid callers needing to care too much about CONFIG_MIPS_FP_SUPPORT. |
| 256 | */ |
| 257 | |
| 258 | static inline int __enable_fpu(enum fpu_mode mode) |
| 259 | { |
| 260 | return SIGILL; |
| 261 | } |
| 262 | |
| 263 | static inline void __disable_fpu(void) |
| 264 | { |
| 265 | /* no-op */ |
| 266 | } |
| 267 | |
| 268 | |
| 269 | static inline int is_fpu_owner(void) |
| 270 | { |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | static inline void clear_fpu_owner(void) |
| 275 | { |
| 276 | /* no-op */ |
| 277 | } |
| 278 | |
| 279 | static inline int own_fpu_inatomic(int restore) |
| 280 | { |
| 281 | return SIGILL; |
| 282 | } |
| 283 | |
| 284 | static inline int own_fpu(int restore) |
| 285 | { |
| 286 | return SIGILL; |
| 287 | } |
| 288 | |
| 289 | static inline void lose_fpu_inatomic(int save, struct task_struct *tsk) |
| 290 | { |
| 291 | /* no-op */ |
| 292 | } |
| 293 | |
| 294 | static inline void lose_fpu(int save) |
| 295 | { |
| 296 | /* no-op */ |
| 297 | } |
| 298 | |
| 299 | static inline bool init_fp_ctx(struct task_struct *target) |
| 300 | { |
| 301 | return false; |
| 302 | } |
| 303 | |
| 304 | /* |
| 305 | * The following functions should only be called in paths where we know that FP |
| 306 | * support is enabled, typically a path where own_fpu() or __enable_fpu() have |
| 307 | * returned successfully. When CONFIG_MIPS_FP_SUPPORT=n it is known at compile |
| 308 | * time that this should never happen, so calls to these functions should be |
| 309 | * optimized away & never actually be emitted. |
| 310 | */ |
| 311 | |
| 312 | extern void save_fp(struct task_struct *tsk) |
| 313 | __compiletime_error("save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); |
| 314 | |
| 315 | extern void _save_fp(struct task_struct *) |
| 316 | __compiletime_error("_save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); |
| 317 | |
| 318 | extern void restore_fp(struct task_struct *tsk) |
| 319 | __compiletime_error("restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); |
| 320 | |
| 321 | extern void _restore_fp(struct task_struct *) |
| 322 | __compiletime_error("_restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); |
| 323 | |
| 324 | extern union fpureg *get_fpu_regs(struct task_struct *tsk) |
| 325 | __compiletime_error("get_fpu_regs() should not be called when CONFIG_MIPS_FP_SUPPORT=n"); |
| 326 | |
| 327 | #endif /* !CONFIG_MIPS_FP_SUPPORT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | #endif /* _ASM_FPU_H */ |