fpu-internal.h revision c375f15a434db1867cb004bafba92aba739e4e39
1/* 2 * Copyright (C) 1994 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * General FPU state handling cleanups 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 * x86-64 work by Andi Kleen 2002 8 */ 9 10#ifndef _FPU_INTERNAL_H 11#define _FPU_INTERNAL_H 12 13#include <linux/kernel_stat.h> 14#include <linux/regset.h> 15#include <linux/compat.h> 16#include <linux/slab.h> 17#include <asm/asm.h> 18#include <asm/cpufeature.h> 19#include <asm/processor.h> 20#include <asm/sigcontext.h> 21#include <asm/user.h> 22#include <asm/uaccess.h> 23#include <asm/xsave.h> 24#include <asm/smap.h> 25 26#ifdef CONFIG_X86_64 27# include <asm/sigcontext32.h> 28# include <asm/user32.h> 29struct ksignal; 30int ia32_setup_rt_frame(int sig, struct ksignal *ksig, 31 compat_sigset_t *set, struct pt_regs *regs); 32int ia32_setup_frame(int sig, struct ksignal *ksig, 33 compat_sigset_t *set, struct pt_regs *regs); 34#else 35# define user_i387_ia32_struct user_i387_struct 36# define user32_fxsr_struct user_fxsr_struct 37# define ia32_setup_frame __setup_frame 38# define ia32_setup_rt_frame __setup_rt_frame 39#endif 40 41extern unsigned int mxcsr_feature_mask; 42extern void fpu_init(void); 43extern void eager_fpu_init(void); 44 45DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); 46 47extern void convert_from_fxsr(struct user_i387_ia32_struct *env, 48 struct task_struct *tsk); 49extern void convert_to_fxsr(struct task_struct *tsk, 50 const struct user_i387_ia32_struct *env); 51 52extern user_regset_active_fn fpregs_active, xfpregs_active; 53extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, 54 xstateregs_get; 55extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, 56 xstateregs_set; 57 58/* 59 * xstateregs_active == fpregs_active. Please refer to the comment 60 * at the definition of fpregs_active. 61 */ 62#define xstateregs_active fpregs_active 63 64#ifdef CONFIG_MATH_EMULATION 65extern void finit_soft_fpu(struct i387_soft_struct *soft); 66#else 67static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 68#endif 69 70static inline int is_ia32_compat_frame(void) 71{ 72 return config_enabled(CONFIG_IA32_EMULATION) && 73 test_thread_flag(TIF_IA32); 74} 75 76static inline int is_ia32_frame(void) 77{ 78 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); 79} 80 81static inline int is_x32_frame(void) 82{ 83 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); 84} 85 86#define X87_FSW_ES (1 << 7) /* Exception Summary */ 87 88static __always_inline __pure bool use_eager_fpu(void) 89{ 90 return static_cpu_has(X86_FEATURE_EAGER_FPU); 91} 92 93static __always_inline __pure bool use_xsaveopt(void) 94{ 95 return static_cpu_has(X86_FEATURE_XSAVEOPT); 96} 97 98static __always_inline __pure bool use_xsave(void) 99{ 100 return static_cpu_has(X86_FEATURE_XSAVE); 101} 102 103static __always_inline __pure bool use_fxsr(void) 104{ 105 return static_cpu_has(X86_FEATURE_FXSR); 106} 107 108static inline void fx_finit(struct i387_fxsave_struct *fx) 109{ 110 memset(fx, 0, xstate_size); 111 fx->cwd = 0x37f; 112 fx->mxcsr = MXCSR_DEFAULT; 113} 114 115extern void __sanitize_i387_state(struct task_struct *); 116 117static inline void sanitize_i387_state(struct task_struct *tsk) 118{ 119 if (!use_xsaveopt()) 120 return; 121 __sanitize_i387_state(tsk); 122} 123 124#define user_insn(insn, output, input...) \ 125({ \ 126 int err; \ 127 asm volatile(ASM_STAC "\n" \ 128 "1:" #insn "\n\t" \ 129 "2: " ASM_CLAC "\n" \ 130 ".section .fixup,\"ax\"\n" \ 131 "3: movl $-1,%[err]\n" \ 132 " jmp 2b\n" \ 133 ".previous\n" \ 134 _ASM_EXTABLE(1b, 3b) \ 135 : [err] "=r" (err), output \ 136 : "0"(0), input); \ 137 err; \ 138}) 139 140#define check_insn(insn, output, input...) \ 141({ \ 142 int err; \ 143 asm volatile("1:" #insn "\n\t" \ 144 "2:\n" \ 145 ".section .fixup,\"ax\"\n" \ 146 "3: movl $-1,%[err]\n" \ 147 " jmp 2b\n" \ 148 ".previous\n" \ 149 _ASM_EXTABLE(1b, 3b) \ 150 : [err] "=r" (err), output \ 151 : "0"(0), input); \ 152 err; \ 153}) 154 155static inline int fsave_user(struct i387_fsave_struct __user *fx) 156{ 157 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); 158} 159 160static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 161{ 162 if (config_enabled(CONFIG_X86_32)) 163 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); 164 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 165 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); 166 167 /* See comment in fpu_fxsave() below. */ 168 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); 169} 170 171static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 172{ 173 if (config_enabled(CONFIG_X86_32)) 174 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 175 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 176 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 177 178 /* See comment in fpu_fxsave() below. */ 179 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), 180 "m" (*fx)); 181} 182 183static inline int fxrstor_user(struct i387_fxsave_struct __user *fx) 184{ 185 if (config_enabled(CONFIG_X86_32)) 186 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 187 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 188 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 189 190 /* See comment in fpu_fxsave() below. */ 191 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), 192 "m" (*fx)); 193} 194 195static inline int frstor_checking(struct i387_fsave_struct *fx) 196{ 197 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 198} 199 200static inline int frstor_user(struct i387_fsave_struct __user *fx) 201{ 202 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 203} 204 205static inline void fpu_fxsave(struct fpu *fpu) 206{ 207 if (config_enabled(CONFIG_X86_32)) 208 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); 209 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 210 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); 211 else { 212 /* Using "rex64; fxsave %0" is broken because, if the memory 213 * operand uses any extended registers for addressing, a second 214 * REX prefix will be generated (to the assembler, rex64 215 * followed by semicolon is a separate instruction), and hence 216 * the 64-bitness is lost. 217 * 218 * Using "fxsaveq %0" would be the ideal choice, but is only 219 * supported starting with gas 2.16. 220 * 221 * Using, as a workaround, the properly prefixed form below 222 * isn't accepted by any binutils version so far released, 223 * complaining that the same type of prefix is used twice if 224 * an extended register is needed for addressing (fix submitted 225 * to mainline 2005-11-21). 226 * 227 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); 228 * 229 * This, however, we can work around by forcing the compiler to 230 * select an addressing mode that doesn't require extended 231 * registers. 232 */ 233 asm volatile( "rex64/fxsave (%[fx])" 234 : "=m" (fpu->state->fxsave) 235 : [fx] "R" (&fpu->state->fxsave)); 236 } 237} 238 239/* 240 * These must be called with preempt disabled. Returns 241 * 'true' if the FPU state is still intact. 242 */ 243static inline int fpu_save_init(struct fpu *fpu) 244{ 245 if (use_xsave()) { 246 fpu_xsave(fpu); 247 248 /* 249 * xsave header may indicate the init state of the FP. 250 */ 251 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) 252 return 1; 253 } else if (use_fxsr()) { 254 fpu_fxsave(fpu); 255 } else { 256 asm volatile("fnsave %[fx]; fwait" 257 : [fx] "=m" (fpu->state->fsave)); 258 return 0; 259 } 260 261 /* 262 * If exceptions are pending, we need to clear them so 263 * that we don't randomly get exceptions later. 264 * 265 * FIXME! Is this perhaps only true for the old-style 266 * irq13 case? Maybe we could leave the x87 state 267 * intact otherwise? 268 */ 269 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { 270 asm volatile("fnclex"); 271 return 0; 272 } 273 return 1; 274} 275 276static inline int __save_init_fpu(struct task_struct *tsk) 277{ 278 return fpu_save_init(&tsk->thread.fpu); 279} 280 281static inline int fpu_restore_checking(struct fpu *fpu) 282{ 283 if (use_xsave()) 284 return fpu_xrstor_checking(&fpu->state->xsave); 285 else if (use_fxsr()) 286 return fxrstor_checking(&fpu->state->fxsave); 287 else 288 return frstor_checking(&fpu->state->fsave); 289} 290 291static inline int restore_fpu_checking(struct task_struct *tsk) 292{ 293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 294 is pending. Clear the x87 state here by setting it to fixed 295 values. "m" is a random variable that should be in L1 */ 296 alternative_input( 297 ASM_NOP8 ASM_NOP2, 298 "emms\n\t" /* clear stack tags */ 299 "fildl %P[addr]", /* set F?P to defined value */ 300 X86_FEATURE_FXSAVE_LEAK, 301 [addr] "m" (tsk->thread.fpu.has_fpu)); 302 303 return fpu_restore_checking(&tsk->thread.fpu); 304} 305 306/* 307 * Software FPU state helpers. Careful: these need to 308 * be preemption protection *and* they need to be 309 * properly paired with the CR0.TS changes! 310 */ 311static inline int __thread_has_fpu(struct task_struct *tsk) 312{ 313 return tsk->thread.fpu.has_fpu; 314} 315 316/* Must be paired with an 'stts' after! */ 317static inline void __thread_clear_has_fpu(struct task_struct *tsk) 318{ 319 tsk->thread.fpu.has_fpu = 0; 320 this_cpu_write(fpu_owner_task, NULL); 321} 322 323/* Must be paired with a 'clts' before! */ 324static inline void __thread_set_has_fpu(struct task_struct *tsk) 325{ 326 tsk->thread.fpu.has_fpu = 1; 327 this_cpu_write(fpu_owner_task, tsk); 328} 329 330/* 331 * Encapsulate the CR0.TS handling together with the 332 * software flag. 333 * 334 * These generally need preemption protection to work, 335 * do try to avoid using these on their own. 336 */ 337static inline void __thread_fpu_end(struct task_struct *tsk) 338{ 339 __thread_clear_has_fpu(tsk); 340 if (!use_eager_fpu()) 341 stts(); 342} 343 344static inline void __thread_fpu_begin(struct task_struct *tsk) 345{ 346 if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU)) 347 clts(); 348 __thread_set_has_fpu(tsk); 349} 350 351static inline void __drop_fpu(struct task_struct *tsk) 352{ 353 if (__thread_has_fpu(tsk)) { 354 /* Ignore delayed exceptions from user space */ 355 asm volatile("1: fwait\n" 356 "2:\n" 357 _ASM_EXTABLE(1b, 2b)); 358 __thread_fpu_end(tsk); 359 } 360} 361 362static inline void drop_fpu(struct task_struct *tsk) 363{ 364 /* 365 * Forget coprocessor state.. 366 */ 367 preempt_disable(); 368 tsk->thread.fpu_counter = 0; 369 __drop_fpu(tsk); 370 clear_used_math(); 371 preempt_enable(); 372} 373 374static inline void drop_init_fpu(struct task_struct *tsk) 375{ 376 if (!use_eager_fpu()) 377 drop_fpu(tsk); 378 else { 379 if (use_xsave()) 380 xrstor_state(init_xstate_buf, -1); 381 else 382 fxrstor_checking(&init_xstate_buf->i387); 383 } 384} 385 386/* 387 * FPU state switching for scheduling. 388 * 389 * This is a two-stage process: 390 * 391 * - switch_fpu_prepare() saves the old state and 392 * sets the new state of the CR0.TS bit. This is 393 * done within the context of the old process. 394 * 395 * - switch_fpu_finish() restores the new state as 396 * necessary. 397 */ 398typedef struct { int preload; } fpu_switch_t; 399 400/* 401 * Must be run with preemption disabled: this clears the fpu_owner_task, 402 * on this CPU. 403 * 404 * This will disable any lazy FPU state restore of the current FPU state, 405 * but if the current thread owns the FPU, it will still be saved by. 406 */ 407static inline void __cpu_disable_lazy_restore(unsigned int cpu) 408{ 409 per_cpu(fpu_owner_task, cpu) = NULL; 410} 411 412static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) 413{ 414 return new == this_cpu_read_stable(fpu_owner_task) && 415 cpu == new->thread.fpu.last_cpu; 416} 417 418static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) 419{ 420 fpu_switch_t fpu; 421 422 /* 423 * If the task has used the math, pre-load the FPU on xsave processors 424 * or if the past 5 consecutive context-switches used math. 425 */ 426 fpu.preload = tsk_used_math(new) && (use_eager_fpu() || 427 new->thread.fpu_counter > 5); 428 if (__thread_has_fpu(old)) { 429 if (!__save_init_fpu(old)) 430 cpu = ~0; 431 old->thread.fpu.last_cpu = cpu; 432 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ 433 434 /* Don't change CR0.TS if we just switch! */ 435 if (fpu.preload) { 436 new->thread.fpu_counter++; 437 __thread_set_has_fpu(new); 438 prefetch(new->thread.fpu.state); 439 } else if (!use_eager_fpu()) 440 stts(); 441 } else { 442 old->thread.fpu_counter = 0; 443 old->thread.fpu.last_cpu = ~0; 444 if (fpu.preload) { 445 new->thread.fpu_counter++; 446 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) 447 fpu.preload = 0; 448 else 449 prefetch(new->thread.fpu.state); 450 __thread_fpu_begin(new); 451 } 452 } 453 return fpu; 454} 455 456/* 457 * By the time this gets called, we've already cleared CR0.TS and 458 * given the process the FPU if we are going to preload the FPU 459 * state - all we need to do is to conditionally restore the register 460 * state itself. 461 */ 462static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) 463{ 464 if (fpu.preload) { 465 if (unlikely(restore_fpu_checking(new))) 466 drop_init_fpu(new); 467 } 468} 469 470/* 471 * Signal frame handlers... 472 */ 473extern int save_xstate_sig(void __user *buf, void __user *fx, int size); 474extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); 475 476static inline int xstate_sigframe_size(void) 477{ 478 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; 479} 480 481static inline int restore_xstate_sig(void __user *buf, int ia32_frame) 482{ 483 void __user *buf_fx = buf; 484 int size = xstate_sigframe_size(); 485 486 if (ia32_frame && use_fxsr()) { 487 buf_fx = buf + sizeof(struct i387_fsave_struct); 488 size += sizeof(struct i387_fsave_struct); 489 } 490 491 return __restore_xstate_sig(buf, buf_fx, size); 492} 493 494/* 495 * Need to be preemption-safe. 496 * 497 * NOTE! user_fpu_begin() must be used only immediately before restoring 498 * it. This function does not do any save/restore on their own. 499 */ 500static inline void user_fpu_begin(void) 501{ 502 preempt_disable(); 503 if (!user_has_fpu()) 504 __thread_fpu_begin(current); 505 preempt_enable(); 506} 507 508static inline void __save_fpu(struct task_struct *tsk) 509{ 510 if (use_xsave()) 511 xsave_state(&tsk->thread.fpu.state->xsave, -1); 512 else 513 fpu_fxsave(&tsk->thread.fpu); 514} 515 516/* 517 * These disable preemption on their own and are safe 518 */ 519static inline void save_init_fpu(struct task_struct *tsk) 520{ 521 WARN_ON_ONCE(!__thread_has_fpu(tsk)); 522 523 if (use_eager_fpu()) { 524 __save_fpu(tsk); 525 return; 526 } 527 528 preempt_disable(); 529 __save_init_fpu(tsk); 530 __thread_fpu_end(tsk); 531 preempt_enable(); 532} 533 534/* 535 * i387 state interaction 536 */ 537static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 538{ 539 if (cpu_has_fxsr) { 540 return tsk->thread.fpu.state->fxsave.cwd; 541 } else { 542 return (unsigned short)tsk->thread.fpu.state->fsave.cwd; 543 } 544} 545 546static inline unsigned short get_fpu_swd(struct task_struct *tsk) 547{ 548 if (cpu_has_fxsr) { 549 return tsk->thread.fpu.state->fxsave.swd; 550 } else { 551 return (unsigned short)tsk->thread.fpu.state->fsave.swd; 552 } 553} 554 555static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) 556{ 557 if (cpu_has_xmm) { 558 return tsk->thread.fpu.state->fxsave.mxcsr; 559 } else { 560 return MXCSR_DEFAULT; 561 } 562} 563 564static bool fpu_allocated(struct fpu *fpu) 565{ 566 return fpu->state != NULL; 567} 568 569static inline int fpu_alloc(struct fpu *fpu) 570{ 571 if (fpu_allocated(fpu)) 572 return 0; 573 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); 574 if (!fpu->state) 575 return -ENOMEM; 576 WARN_ON((unsigned long)fpu->state & 15); 577 return 0; 578} 579 580static inline void fpu_free(struct fpu *fpu) 581{ 582 if (fpu->state) { 583 kmem_cache_free(task_xstate_cachep, fpu->state); 584 fpu->state = NULL; 585 } 586} 587 588static inline void fpu_copy(struct task_struct *dst, struct task_struct *src) 589{ 590 if (use_eager_fpu()) { 591 memset(&dst->thread.fpu.state->xsave, 0, xstate_size); 592 __save_fpu(dst); 593 } else { 594 struct fpu *dfpu = &dst->thread.fpu; 595 struct fpu *sfpu = &src->thread.fpu; 596 597 unlazy_fpu(src); 598 memcpy(dfpu->state, sfpu->state, xstate_size); 599 } 600} 601 602static inline unsigned long 603alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, 604 unsigned long *size) 605{ 606 unsigned long frame_size = xstate_sigframe_size(); 607 608 *buf_fx = sp = round_down(sp - frame_size, 64); 609 if (ia32_frame && use_fxsr()) { 610 frame_size += sizeof(struct i387_fsave_struct); 611 sp -= sizeof(struct i387_fsave_struct); 612 } 613 614 *size = frame_size; 615 return sp; 616} 617 618#endif 619