fpu-internal.h revision 31d963389f67165402aa447a8e8ce5ffb9188b3d
1/* 2 * Copyright (C) 1994 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * General FPU state handling cleanups 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 * x86-64 work by Andi Kleen 2002 8 */ 9 10#ifndef _FPU_INTERNAL_H 11#define _FPU_INTERNAL_H 12 13#include <linux/kernel_stat.h> 14#include <linux/regset.h> 15#include <linux/compat.h> 16#include <linux/slab.h> 17#include <asm/asm.h> 18#include <asm/cpufeature.h> 19#include <asm/processor.h> 20#include <asm/sigcontext.h> 21#include <asm/user.h> 22#include <asm/uaccess.h> 23#include <asm/xsave.h> 24#include <asm/smap.h> 25 26#ifdef CONFIG_X86_64 27# include <asm/sigcontext32.h> 28# include <asm/user32.h> 29struct ksignal; 30int ia32_setup_rt_frame(int sig, struct ksignal *ksig, 31 compat_sigset_t *set, struct pt_regs *regs); 32int ia32_setup_frame(int sig, struct ksignal *ksig, 33 compat_sigset_t *set, struct pt_regs *regs); 34#else 35# define user_i387_ia32_struct user_i387_struct 36# define user32_fxsr_struct user_fxsr_struct 37# define ia32_setup_frame __setup_frame 38# define ia32_setup_rt_frame __setup_rt_frame 39#endif 40 41extern unsigned int mxcsr_feature_mask; 42extern void fpu_init(void); 43extern void eager_fpu_init(void); 44 45DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); 46 47extern void convert_from_fxsr(struct user_i387_ia32_struct *env, 48 struct task_struct *tsk); 49extern void convert_to_fxsr(struct task_struct *tsk, 50 const struct user_i387_ia32_struct *env); 51 52extern user_regset_active_fn fpregs_active, xfpregs_active; 53extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, 54 xstateregs_get; 55extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, 56 xstateregs_set; 57 58/* 59 * xstateregs_active == fpregs_active. Please refer to the comment 60 * at the definition of fpregs_active. 61 */ 62#define xstateregs_active fpregs_active 63 64#ifdef CONFIG_MATH_EMULATION 65extern void finit_soft_fpu(struct i387_soft_struct *soft); 66#else 67static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 68#endif 69 70static inline int is_ia32_compat_frame(void) 71{ 72 return config_enabled(CONFIG_IA32_EMULATION) && 73 test_thread_flag(TIF_IA32); 74} 75 76static inline int is_ia32_frame(void) 77{ 78 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); 79} 80 81static inline int is_x32_frame(void) 82{ 83 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); 84} 85 86#define X87_FSW_ES (1 << 7) /* Exception Summary */ 87 88static __always_inline __pure bool use_eager_fpu(void) 89{ 90 return static_cpu_has_safe(X86_FEATURE_EAGER_FPU); 91} 92 93static __always_inline __pure bool use_xsaveopt(void) 94{ 95 return static_cpu_has_safe(X86_FEATURE_XSAVEOPT); 96} 97 98static __always_inline __pure bool use_xsave(void) 99{ 100 return static_cpu_has_safe(X86_FEATURE_XSAVE); 101} 102 103static __always_inline __pure bool use_fxsr(void) 104{ 105 return static_cpu_has_safe(X86_FEATURE_FXSR); 106} 107 108static inline void fx_finit(struct i387_fxsave_struct *fx) 109{ 110 memset(fx, 0, xstate_size); 111 fx->cwd = 0x37f; 112 fx->mxcsr = MXCSR_DEFAULT; 113} 114 115extern void __sanitize_i387_state(struct task_struct *); 116 117static inline void sanitize_i387_state(struct task_struct *tsk) 118{ 119 if (!use_xsaveopt()) 120 return; 121 __sanitize_i387_state(tsk); 122} 123 124#define user_insn(insn, output, input...) \ 125({ \ 126 int err; \ 127 asm volatile(ASM_STAC "\n" \ 128 "1:" #insn "\n\t" \ 129 "2: " ASM_CLAC "\n" \ 130 ".section .fixup,\"ax\"\n" \ 131 "3: movl $-1,%[err]\n" \ 132 " jmp 2b\n" \ 133 ".previous\n" \ 134 _ASM_EXTABLE(1b, 3b) \ 135 : [err] "=r" (err), output \ 136 : "0"(0), input); \ 137 err; \ 138}) 139 140#define check_insn(insn, output, input...) \ 141({ \ 142 int err; \ 143 asm volatile("1:" #insn "\n\t" \ 144 "2:\n" \ 145 ".section .fixup,\"ax\"\n" \ 146 "3: movl $-1,%[err]\n" \ 147 " jmp 2b\n" \ 148 ".previous\n" \ 149 _ASM_EXTABLE(1b, 3b) \ 150 : [err] "=r" (err), output \ 151 : "0"(0), input); \ 152 err; \ 153}) 154 155static inline int fsave_user(struct i387_fsave_struct __user *fx) 156{ 157 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); 158} 159 160static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 161{ 162 if (config_enabled(CONFIG_X86_32)) 163 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); 164 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 165 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); 166 167 /* See comment in fpu_fxsave() below. */ 168 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); 169} 170 171static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 172{ 173 if (config_enabled(CONFIG_X86_32)) 174 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 175 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 176 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 177 178 /* See comment in fpu_fxsave() below. */ 179 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), 180 "m" (*fx)); 181} 182 183static inline int fxrstor_user(struct i387_fxsave_struct __user *fx) 184{ 185 if (config_enabled(CONFIG_X86_32)) 186 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 187 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 188 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 189 190 /* See comment in fpu_fxsave() below. */ 191 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), 192 "m" (*fx)); 193} 194 195static inline int frstor_checking(struct i387_fsave_struct *fx) 196{ 197 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 198} 199 200static inline int frstor_user(struct i387_fsave_struct __user *fx) 201{ 202 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 203} 204 205static inline void fpu_fxsave(struct fpu *fpu) 206{ 207 if (config_enabled(CONFIG_X86_32)) 208 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); 209 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 210 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); 211 else { 212 /* Using "rex64; fxsave %0" is broken because, if the memory 213 * operand uses any extended registers for addressing, a second 214 * REX prefix will be generated (to the assembler, rex64 215 * followed by semicolon is a separate instruction), and hence 216 * the 64-bitness is lost. 217 * 218 * Using "fxsaveq %0" would be the ideal choice, but is only 219 * supported starting with gas 2.16. 220 * 221 * Using, as a workaround, the properly prefixed form below 222 * isn't accepted by any binutils version so far released, 223 * complaining that the same type of prefix is used twice if 224 * an extended register is needed for addressing (fix submitted 225 * to mainline 2005-11-21). 226 * 227 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); 228 * 229 * This, however, we can work around by forcing the compiler to 230 * select an addressing mode that doesn't require extended 231 * registers. 232 */ 233 asm volatile( "rex64/fxsave (%[fx])" 234 : "=m" (fpu->state->fxsave) 235 : [fx] "R" (&fpu->state->fxsave)); 236 } 237} 238 239/* 240 * These must be called with preempt disabled. Returns 241 * 'true' if the FPU state is still intact. 242 */ 243static inline int fpu_save_init(struct fpu *fpu) 244{ 245 if (use_xsave()) { 246 fpu_xsave(fpu); 247 248 /* 249 * xsave header may indicate the init state of the FP. 250 */ 251 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) 252 return 1; 253 } else if (use_fxsr()) { 254 fpu_fxsave(fpu); 255 } else { 256 asm volatile("fnsave %[fx]; fwait" 257 : [fx] "=m" (fpu->state->fsave)); 258 return 0; 259 } 260 261 /* 262 * If exceptions are pending, we need to clear them so 263 * that we don't randomly get exceptions later. 264 * 265 * FIXME! Is this perhaps only true for the old-style 266 * irq13 case? Maybe we could leave the x87 state 267 * intact otherwise? 268 */ 269 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { 270 asm volatile("fnclex"); 271 return 0; 272 } 273 return 1; 274} 275 276static inline int __save_init_fpu(struct task_struct *tsk) 277{ 278 return fpu_save_init(&tsk->thread.fpu); 279} 280 281static inline int fpu_restore_checking(struct fpu *fpu) 282{ 283 if (use_xsave()) 284 return fpu_xrstor_checking(&fpu->state->xsave); 285 else if (use_fxsr()) 286 return fxrstor_checking(&fpu->state->fxsave); 287 else 288 return frstor_checking(&fpu->state->fsave); 289} 290 291static inline int restore_fpu_checking(struct task_struct *tsk) 292{ 293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 294 is pending. Clear the x87 state here by setting it to fixed 295 values. "m" is a random variable that should be in L1 */ 296 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { 297 asm volatile( 298 "fnclex\n\t" 299 "emms\n\t" 300 "fildl %P[addr]" /* set F?P to defined value */ 301 : : [addr] "m" (tsk->thread.fpu.has_fpu)); 302 } 303 304 return fpu_restore_checking(&tsk->thread.fpu); 305} 306 307/* 308 * Software FPU state helpers. Careful: these need to 309 * be preemption protection *and* they need to be 310 * properly paired with the CR0.TS changes! 311 */ 312static inline int __thread_has_fpu(struct task_struct *tsk) 313{ 314 return tsk->thread.fpu.has_fpu; 315} 316 317/* Must be paired with an 'stts' after! */ 318static inline void __thread_clear_has_fpu(struct task_struct *tsk) 319{ 320 tsk->thread.fpu.has_fpu = 0; 321 this_cpu_write(fpu_owner_task, NULL); 322} 323 324/* Must be paired with a 'clts' before! */ 325static inline void __thread_set_has_fpu(struct task_struct *tsk) 326{ 327 tsk->thread.fpu.has_fpu = 1; 328 this_cpu_write(fpu_owner_task, tsk); 329} 330 331/* 332 * Encapsulate the CR0.TS handling together with the 333 * software flag. 334 * 335 * These generally need preemption protection to work, 336 * do try to avoid using these on their own. 337 */ 338static inline void __thread_fpu_end(struct task_struct *tsk) 339{ 340 __thread_clear_has_fpu(tsk); 341 if (!use_eager_fpu()) 342 stts(); 343} 344 345static inline void __thread_fpu_begin(struct task_struct *tsk) 346{ 347 if (!use_eager_fpu()) 348 clts(); 349 __thread_set_has_fpu(tsk); 350} 351 352static inline void __drop_fpu(struct task_struct *tsk) 353{ 354 if (__thread_has_fpu(tsk)) { 355 /* Ignore delayed exceptions from user space */ 356 asm volatile("1: fwait\n" 357 "2:\n" 358 _ASM_EXTABLE(1b, 2b)); 359 __thread_fpu_end(tsk); 360 } 361} 362 363static inline void drop_fpu(struct task_struct *tsk) 364{ 365 /* 366 * Forget coprocessor state.. 367 */ 368 preempt_disable(); 369 tsk->thread.fpu_counter = 0; 370 __drop_fpu(tsk); 371 clear_used_math(); 372 preempt_enable(); 373} 374 375static inline void drop_init_fpu(struct task_struct *tsk) 376{ 377 if (!use_eager_fpu()) 378 drop_fpu(tsk); 379 else { 380 if (use_xsave()) 381 xrstor_state(init_xstate_buf, -1); 382 else 383 fxrstor_checking(&init_xstate_buf->i387); 384 } 385} 386 387/* 388 * FPU state switching for scheduling. 389 * 390 * This is a two-stage process: 391 * 392 * - switch_fpu_prepare() saves the old state and 393 * sets the new state of the CR0.TS bit. This is 394 * done within the context of the old process. 395 * 396 * - switch_fpu_finish() restores the new state as 397 * necessary. 398 */ 399typedef struct { int preload; } fpu_switch_t; 400 401/* 402 * Must be run with preemption disabled: this clears the fpu_owner_task, 403 * on this CPU. 404 * 405 * This will disable any lazy FPU state restore of the current FPU state, 406 * but if the current thread owns the FPU, it will still be saved by. 407 */ 408static inline void __cpu_disable_lazy_restore(unsigned int cpu) 409{ 410 per_cpu(fpu_owner_task, cpu) = NULL; 411} 412 413static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) 414{ 415 return new == this_cpu_read_stable(fpu_owner_task) && 416 cpu == new->thread.fpu.last_cpu; 417} 418 419static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) 420{ 421 fpu_switch_t fpu; 422 423 /* 424 * If the task has used the math, pre-load the FPU on xsave processors 425 * or if the past 5 consecutive context-switches used math. 426 */ 427 fpu.preload = tsk_used_math(new) && (use_eager_fpu() || 428 new->thread.fpu_counter > 5); 429 if (__thread_has_fpu(old)) { 430 if (!__save_init_fpu(old)) 431 cpu = ~0; 432 old->thread.fpu.last_cpu = cpu; 433 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ 434 435 /* Don't change CR0.TS if we just switch! */ 436 if (fpu.preload) { 437 new->thread.fpu_counter++; 438 __thread_set_has_fpu(new); 439 prefetch(new->thread.fpu.state); 440 } else if (!use_eager_fpu()) 441 stts(); 442 } else { 443 old->thread.fpu_counter = 0; 444 old->thread.fpu.last_cpu = ~0; 445 if (fpu.preload) { 446 new->thread.fpu_counter++; 447 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) 448 fpu.preload = 0; 449 else 450 prefetch(new->thread.fpu.state); 451 __thread_fpu_begin(new); 452 } 453 } 454 return fpu; 455} 456 457/* 458 * By the time this gets called, we've already cleared CR0.TS and 459 * given the process the FPU if we are going to preload the FPU 460 * state - all we need to do is to conditionally restore the register 461 * state itself. 462 */ 463static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) 464{ 465 if (fpu.preload) { 466 if (unlikely(restore_fpu_checking(new))) 467 drop_init_fpu(new); 468 } 469} 470 471/* 472 * Signal frame handlers... 473 */ 474extern int save_xstate_sig(void __user *buf, void __user *fx, int size); 475extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); 476 477static inline int xstate_sigframe_size(void) 478{ 479 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; 480} 481 482static inline int restore_xstate_sig(void __user *buf, int ia32_frame) 483{ 484 void __user *buf_fx = buf; 485 int size = xstate_sigframe_size(); 486 487 if (ia32_frame && use_fxsr()) { 488 buf_fx = buf + sizeof(struct i387_fsave_struct); 489 size += sizeof(struct i387_fsave_struct); 490 } 491 492 return __restore_xstate_sig(buf, buf_fx, size); 493} 494 495/* 496 * Need to be preemption-safe. 497 * 498 * NOTE! user_fpu_begin() must be used only immediately before restoring 499 * it. This function does not do any save/restore on their own. 500 */ 501static inline void user_fpu_begin(void) 502{ 503 preempt_disable(); 504 if (!user_has_fpu()) 505 __thread_fpu_begin(current); 506 preempt_enable(); 507} 508 509static inline void __save_fpu(struct task_struct *tsk) 510{ 511 if (use_xsave()) { 512 if (unlikely(system_state == SYSTEM_BOOTING)) 513 xsave_state_booting(&tsk->thread.fpu.state->xsave, -1); 514 else 515 xsave_state(&tsk->thread.fpu.state->xsave, -1); 516 } else 517 fpu_fxsave(&tsk->thread.fpu); 518} 519 520/* 521 * These disable preemption on their own and are safe 522 */ 523static inline void save_init_fpu(struct task_struct *tsk) 524{ 525 WARN_ON_ONCE(!__thread_has_fpu(tsk)); 526 527 if (use_eager_fpu()) { 528 __save_fpu(tsk); 529 return; 530 } 531 532 preempt_disable(); 533 __save_init_fpu(tsk); 534 __thread_fpu_end(tsk); 535 preempt_enable(); 536} 537 538/* 539 * i387 state interaction 540 */ 541static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 542{ 543 if (cpu_has_fxsr) { 544 return tsk->thread.fpu.state->fxsave.cwd; 545 } else { 546 return (unsigned short)tsk->thread.fpu.state->fsave.cwd; 547 } 548} 549 550static inline unsigned short get_fpu_swd(struct task_struct *tsk) 551{ 552 if (cpu_has_fxsr) { 553 return tsk->thread.fpu.state->fxsave.swd; 554 } else { 555 return (unsigned short)tsk->thread.fpu.state->fsave.swd; 556 } 557} 558 559static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) 560{ 561 if (cpu_has_xmm) { 562 return tsk->thread.fpu.state->fxsave.mxcsr; 563 } else { 564 return MXCSR_DEFAULT; 565 } 566} 567 568static bool fpu_allocated(struct fpu *fpu) 569{ 570 return fpu->state != NULL; 571} 572 573static inline int fpu_alloc(struct fpu *fpu) 574{ 575 if (fpu_allocated(fpu)) 576 return 0; 577 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); 578 if (!fpu->state) 579 return -ENOMEM; 580 WARN_ON((unsigned long)fpu->state & 15); 581 return 0; 582} 583 584static inline void fpu_free(struct fpu *fpu) 585{ 586 if (fpu->state) { 587 kmem_cache_free(task_xstate_cachep, fpu->state); 588 fpu->state = NULL; 589 } 590} 591 592static inline void fpu_copy(struct task_struct *dst, struct task_struct *src) 593{ 594 if (use_eager_fpu()) { 595 memset(&dst->thread.fpu.state->xsave, 0, xstate_size); 596 __save_fpu(dst); 597 } else { 598 struct fpu *dfpu = &dst->thread.fpu; 599 struct fpu *sfpu = &src->thread.fpu; 600 601 unlazy_fpu(src); 602 memcpy(dfpu->state, sfpu->state, xstate_size); 603 } 604} 605 606static inline unsigned long 607alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, 608 unsigned long *size) 609{ 610 unsigned long frame_size = xstate_sigframe_size(); 611 612 *buf_fx = sp = round_down(sp - frame_size, 64); 613 if (ia32_frame && use_fxsr()) { 614 frame_size += sizeof(struct i387_fsave_struct); 615 sp -= sizeof(struct i387_fsave_struct); 616 } 617 618 *size = frame_size; 619 return sp; 620} 621 622#endif 623