fpu-internal.h revision 5d2bd7009f306c82afddd1ca4d9763ad8473c216
1/* 2 * Copyright (C) 1994 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * General FPU state handling cleanups 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 * x86-64 work by Andi Kleen 2002 8 */ 9 10#ifndef _FPU_INTERNAL_H 11#define _FPU_INTERNAL_H 12 13#include <linux/kernel_stat.h> 14#include <linux/regset.h> 15#include <linux/compat.h> 16#include <linux/slab.h> 17#include <asm/asm.h> 18#include <asm/cpufeature.h> 19#include <asm/processor.h> 20#include <asm/sigcontext.h> 21#include <asm/user.h> 22#include <asm/uaccess.h> 23#include <asm/xsave.h> 24 25#ifdef CONFIG_X86_64 26# include <asm/sigcontext32.h> 27# include <asm/user32.h> 28int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 29 compat_sigset_t *set, struct pt_regs *regs); 30int ia32_setup_frame(int sig, struct k_sigaction *ka, 31 compat_sigset_t *set, struct pt_regs *regs); 32#else 33# define user_i387_ia32_struct user_i387_struct 34# define user32_fxsr_struct user_fxsr_struct 35# define ia32_setup_frame __setup_frame 36# define ia32_setup_rt_frame __setup_rt_frame 37#endif 38 39extern unsigned int mxcsr_feature_mask; 40extern void fpu_init(void); 41extern void eager_fpu_init(void); 42 43DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); 44 45extern void convert_from_fxsr(struct user_i387_ia32_struct *env, 46 struct task_struct *tsk); 47extern void convert_to_fxsr(struct task_struct *tsk, 48 const struct user_i387_ia32_struct *env); 49 50extern user_regset_active_fn fpregs_active, xfpregs_active; 51extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, 52 xstateregs_get; 53extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, 54 xstateregs_set; 55 56/* 57 * xstateregs_active == fpregs_active. Please refer to the comment 58 * at the definition of fpregs_active. 59 */ 60#define xstateregs_active fpregs_active 61 62#ifdef CONFIG_MATH_EMULATION 63# define HAVE_HWFP (boot_cpu_data.hard_math) 64extern void finit_soft_fpu(struct i387_soft_struct *soft); 65#else 66# define HAVE_HWFP 1 67static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 68#endif 69 70static inline int is_ia32_compat_frame(void) 71{ 72 return config_enabled(CONFIG_IA32_EMULATION) && 73 test_thread_flag(TIF_IA32); 74} 75 76static inline int is_ia32_frame(void) 77{ 78 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); 79} 80 81static inline int is_x32_frame(void) 82{ 83 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); 84} 85 86#define X87_FSW_ES (1 << 7) /* Exception Summary */ 87 88static __always_inline __pure bool use_eager_fpu(void) 89{ 90 return static_cpu_has(X86_FEATURE_EAGER_FPU); 91} 92 93static __always_inline __pure bool use_xsaveopt(void) 94{ 95 return static_cpu_has(X86_FEATURE_XSAVEOPT); 96} 97 98static __always_inline __pure bool use_xsave(void) 99{ 100 return static_cpu_has(X86_FEATURE_XSAVE); 101} 102 103static __always_inline __pure bool use_fxsr(void) 104{ 105 return static_cpu_has(X86_FEATURE_FXSR); 106} 107 108static inline void fx_finit(struct i387_fxsave_struct *fx) 109{ 110 memset(fx, 0, xstate_size); 111 fx->cwd = 0x37f; 112 if (cpu_has_xmm) 113 fx->mxcsr = MXCSR_DEFAULT; 114} 115 116extern void __sanitize_i387_state(struct task_struct *); 117 118static inline void sanitize_i387_state(struct task_struct *tsk) 119{ 120 if (!use_xsaveopt()) 121 return; 122 __sanitize_i387_state(tsk); 123} 124 125#define check_insn(insn, output, input...) \ 126({ \ 127 int err; \ 128 asm volatile("1:" #insn "\n\t" \ 129 "2:\n" \ 130 ".section .fixup,\"ax\"\n" \ 131 "3: movl $-1,%[err]\n" \ 132 " jmp 2b\n" \ 133 ".previous\n" \ 134 _ASM_EXTABLE(1b, 3b) \ 135 : [err] "=r" (err), output \ 136 : "0"(0), input); \ 137 err; \ 138}) 139 140static inline int fsave_user(struct i387_fsave_struct __user *fx) 141{ 142 return check_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); 143} 144 145static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 146{ 147 if (config_enabled(CONFIG_X86_32)) 148 return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); 149 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 150 return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); 151 152 /* See comment in fpu_fxsave() below. */ 153 return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); 154} 155 156static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 157{ 158 if (config_enabled(CONFIG_X86_32)) 159 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 160 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 161 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 162 163 /* See comment in fpu_fxsave() below. */ 164 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), 165 "m" (*fx)); 166} 167 168static inline int frstor_checking(struct i387_fsave_struct *fx) 169{ 170 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 171} 172 173static inline void fpu_fxsave(struct fpu *fpu) 174{ 175 if (config_enabled(CONFIG_X86_32)) 176 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); 177 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 178 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); 179 else { 180 /* Using "rex64; fxsave %0" is broken because, if the memory 181 * operand uses any extended registers for addressing, a second 182 * REX prefix will be generated (to the assembler, rex64 183 * followed by semicolon is a separate instruction), and hence 184 * the 64-bitness is lost. 185 * 186 * Using "fxsaveq %0" would be the ideal choice, but is only 187 * supported starting with gas 2.16. 188 * 189 * Using, as a workaround, the properly prefixed form below 190 * isn't accepted by any binutils version so far released, 191 * complaining that the same type of prefix is used twice if 192 * an extended register is needed for addressing (fix submitted 193 * to mainline 2005-11-21). 194 * 195 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); 196 * 197 * This, however, we can work around by forcing the compiler to 198 * select an addressing mode that doesn't require extended 199 * registers. 200 */ 201 asm volatile( "rex64/fxsave (%[fx])" 202 : "=m" (fpu->state->fxsave) 203 : [fx] "R" (&fpu->state->fxsave)); 204 } 205} 206 207/* 208 * These must be called with preempt disabled. Returns 209 * 'true' if the FPU state is still intact. 210 */ 211static inline int fpu_save_init(struct fpu *fpu) 212{ 213 if (use_xsave()) { 214 fpu_xsave(fpu); 215 216 /* 217 * xsave header may indicate the init state of the FP. 218 */ 219 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) 220 return 1; 221 } else if (use_fxsr()) { 222 fpu_fxsave(fpu); 223 } else { 224 asm volatile("fnsave %[fx]; fwait" 225 : [fx] "=m" (fpu->state->fsave)); 226 return 0; 227 } 228 229 /* 230 * If exceptions are pending, we need to clear them so 231 * that we don't randomly get exceptions later. 232 * 233 * FIXME! Is this perhaps only true for the old-style 234 * irq13 case? Maybe we could leave the x87 state 235 * intact otherwise? 236 */ 237 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { 238 asm volatile("fnclex"); 239 return 0; 240 } 241 return 1; 242} 243 244static inline int __save_init_fpu(struct task_struct *tsk) 245{ 246 return fpu_save_init(&tsk->thread.fpu); 247} 248 249static inline int fpu_restore_checking(struct fpu *fpu) 250{ 251 if (use_xsave()) 252 return fpu_xrstor_checking(&fpu->state->xsave); 253 else if (use_fxsr()) 254 return fxrstor_checking(&fpu->state->fxsave); 255 else 256 return frstor_checking(&fpu->state->fsave); 257} 258 259static inline int restore_fpu_checking(struct task_struct *tsk) 260{ 261 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 262 is pending. Clear the x87 state here by setting it to fixed 263 values. "m" is a random variable that should be in L1 */ 264 alternative_input( 265 ASM_NOP8 ASM_NOP2, 266 "emms\n\t" /* clear stack tags */ 267 "fildl %P[addr]", /* set F?P to defined value */ 268 X86_FEATURE_FXSAVE_LEAK, 269 [addr] "m" (tsk->thread.fpu.has_fpu)); 270 271 return fpu_restore_checking(&tsk->thread.fpu); 272} 273 274/* 275 * Software FPU state helpers. Careful: these need to 276 * be preemption protection *and* they need to be 277 * properly paired with the CR0.TS changes! 278 */ 279static inline int __thread_has_fpu(struct task_struct *tsk) 280{ 281 return tsk->thread.fpu.has_fpu; 282} 283 284/* Must be paired with an 'stts' after! */ 285static inline void __thread_clear_has_fpu(struct task_struct *tsk) 286{ 287 tsk->thread.fpu.has_fpu = 0; 288 this_cpu_write(fpu_owner_task, NULL); 289} 290 291/* Must be paired with a 'clts' before! */ 292static inline void __thread_set_has_fpu(struct task_struct *tsk) 293{ 294 tsk->thread.fpu.has_fpu = 1; 295 this_cpu_write(fpu_owner_task, tsk); 296} 297 298/* 299 * Encapsulate the CR0.TS handling together with the 300 * software flag. 301 * 302 * These generally need preemption protection to work, 303 * do try to avoid using these on their own. 304 */ 305static inline void __thread_fpu_end(struct task_struct *tsk) 306{ 307 __thread_clear_has_fpu(tsk); 308 if (!use_eager_fpu()) 309 stts(); 310} 311 312static inline void __thread_fpu_begin(struct task_struct *tsk) 313{ 314 if (!use_eager_fpu()) 315 clts(); 316 __thread_set_has_fpu(tsk); 317} 318 319static inline void __drop_fpu(struct task_struct *tsk) 320{ 321 if (__thread_has_fpu(tsk)) { 322 /* Ignore delayed exceptions from user space */ 323 asm volatile("1: fwait\n" 324 "2:\n" 325 _ASM_EXTABLE(1b, 2b)); 326 __thread_fpu_end(tsk); 327 } 328} 329 330static inline void drop_fpu(struct task_struct *tsk) 331{ 332 /* 333 * Forget coprocessor state.. 334 */ 335 preempt_disable(); 336 tsk->fpu_counter = 0; 337 __drop_fpu(tsk); 338 clear_used_math(); 339 preempt_enable(); 340} 341 342static inline void drop_init_fpu(struct task_struct *tsk) 343{ 344 if (!use_eager_fpu()) 345 drop_fpu(tsk); 346 else { 347 if (use_xsave()) 348 xrstor_state(init_xstate_buf, -1); 349 else 350 fxrstor_checking(&init_xstate_buf->i387); 351 } 352} 353 354/* 355 * FPU state switching for scheduling. 356 * 357 * This is a two-stage process: 358 * 359 * - switch_fpu_prepare() saves the old state and 360 * sets the new state of the CR0.TS bit. This is 361 * done within the context of the old process. 362 * 363 * - switch_fpu_finish() restores the new state as 364 * necessary. 365 */ 366typedef struct { int preload; } fpu_switch_t; 367 368/* 369 * FIXME! We could do a totally lazy restore, but we need to 370 * add a per-cpu "this was the task that last touched the FPU 371 * on this CPU" variable, and the task needs to have a "I last 372 * touched the FPU on this CPU" and check them. 373 * 374 * We don't do that yet, so "fpu_lazy_restore()" always returns 375 * false, but some day.. 376 */ 377static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) 378{ 379 return new == this_cpu_read_stable(fpu_owner_task) && 380 cpu == new->thread.fpu.last_cpu; 381} 382 383static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) 384{ 385 fpu_switch_t fpu; 386 387 /* 388 * If the task has used the math, pre-load the FPU on xsave processors 389 * or if the past 5 consecutive context-switches used math. 390 */ 391 fpu.preload = tsk_used_math(new) && (use_eager_fpu() || 392 new->fpu_counter > 5); 393 if (__thread_has_fpu(old)) { 394 if (!__save_init_fpu(old)) 395 cpu = ~0; 396 old->thread.fpu.last_cpu = cpu; 397 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ 398 399 /* Don't change CR0.TS if we just switch! */ 400 if (fpu.preload) { 401 new->fpu_counter++; 402 __thread_set_has_fpu(new); 403 prefetch(new->thread.fpu.state); 404 } else if (!use_eager_fpu()) 405 stts(); 406 } else { 407 old->fpu_counter = 0; 408 old->thread.fpu.last_cpu = ~0; 409 if (fpu.preload) { 410 new->fpu_counter++; 411 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) 412 fpu.preload = 0; 413 else 414 prefetch(new->thread.fpu.state); 415 __thread_fpu_begin(new); 416 } 417 } 418 return fpu; 419} 420 421/* 422 * By the time this gets called, we've already cleared CR0.TS and 423 * given the process the FPU if we are going to preload the FPU 424 * state - all we need to do is to conditionally restore the register 425 * state itself. 426 */ 427static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) 428{ 429 if (fpu.preload) { 430 if (unlikely(restore_fpu_checking(new))) 431 drop_init_fpu(new); 432 } 433} 434 435/* 436 * Signal frame handlers... 437 */ 438extern int save_xstate_sig(void __user *buf, void __user *fx, int size); 439extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); 440 441static inline int xstate_sigframe_size(void) 442{ 443 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; 444} 445 446static inline int restore_xstate_sig(void __user *buf, int ia32_frame) 447{ 448 void __user *buf_fx = buf; 449 int size = xstate_sigframe_size(); 450 451 if (ia32_frame && use_fxsr()) { 452 buf_fx = buf + sizeof(struct i387_fsave_struct); 453 size += sizeof(struct i387_fsave_struct); 454 } 455 456 return __restore_xstate_sig(buf, buf_fx, size); 457} 458 459/* 460 * Need to be preemption-safe. 461 * 462 * NOTE! user_fpu_begin() must be used only immediately before restoring 463 * it. This function does not do any save/restore on their own. 464 */ 465static inline void user_fpu_begin(void) 466{ 467 preempt_disable(); 468 if (!user_has_fpu()) 469 __thread_fpu_begin(current); 470 preempt_enable(); 471} 472 473static inline void __save_fpu(struct task_struct *tsk) 474{ 475 if (use_xsave()) 476 xsave_state(&tsk->thread.fpu.state->xsave, -1); 477 else 478 fpu_fxsave(&tsk->thread.fpu); 479} 480 481/* 482 * These disable preemption on their own and are safe 483 */ 484static inline void save_init_fpu(struct task_struct *tsk) 485{ 486 WARN_ON_ONCE(!__thread_has_fpu(tsk)); 487 488 if (use_eager_fpu()) { 489 __save_fpu(tsk); 490 return; 491 } 492 493 preempt_disable(); 494 __save_init_fpu(tsk); 495 __thread_fpu_end(tsk); 496 preempt_enable(); 497} 498 499/* 500 * i387 state interaction 501 */ 502static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 503{ 504 if (cpu_has_fxsr) { 505 return tsk->thread.fpu.state->fxsave.cwd; 506 } else { 507 return (unsigned short)tsk->thread.fpu.state->fsave.cwd; 508 } 509} 510 511static inline unsigned short get_fpu_swd(struct task_struct *tsk) 512{ 513 if (cpu_has_fxsr) { 514 return tsk->thread.fpu.state->fxsave.swd; 515 } else { 516 return (unsigned short)tsk->thread.fpu.state->fsave.swd; 517 } 518} 519 520static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) 521{ 522 if (cpu_has_xmm) { 523 return tsk->thread.fpu.state->fxsave.mxcsr; 524 } else { 525 return MXCSR_DEFAULT; 526 } 527} 528 529static bool fpu_allocated(struct fpu *fpu) 530{ 531 return fpu->state != NULL; 532} 533 534static inline int fpu_alloc(struct fpu *fpu) 535{ 536 if (fpu_allocated(fpu)) 537 return 0; 538 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); 539 if (!fpu->state) 540 return -ENOMEM; 541 WARN_ON((unsigned long)fpu->state & 15); 542 return 0; 543} 544 545static inline void fpu_free(struct fpu *fpu) 546{ 547 if (fpu->state) { 548 kmem_cache_free(task_xstate_cachep, fpu->state); 549 fpu->state = NULL; 550 } 551} 552 553static inline void fpu_copy(struct task_struct *dst, struct task_struct *src) 554{ 555 if (use_eager_fpu()) { 556 memset(&dst->thread.fpu.state->xsave, 0, xstate_size); 557 __save_fpu(dst); 558 } else { 559 struct fpu *dfpu = &dst->thread.fpu; 560 struct fpu *sfpu = &src->thread.fpu; 561 562 unlazy_fpu(src); 563 memcpy(dfpu->state, sfpu->state, xstate_size); 564 } 565} 566 567static inline unsigned long 568alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, 569 unsigned long *size) 570{ 571 unsigned long frame_size = xstate_sigframe_size(); 572 573 *buf_fx = sp = round_down(sp - frame_size, 64); 574 if (ia32_frame && use_fxsr()) { 575 frame_size += sizeof(struct i387_fsave_struct); 576 sp -= sizeof(struct i387_fsave_struct); 577 } 578 579 *size = frame_size; 580 return sp; 581} 582 583#endif 584