process_32.c revision 5c79d2a517a9905599d192db8ce77ab5f1a2faca
1/* 2 * Copyright (C) 1995 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * Gareth Hughes <gareth@valinux.com>, May 2000 6 */ 7 8/* 9 * This file handles the architecture-dependent parts of process handling.. 10 */ 11 12#include <stdarg.h> 13 14#include <linux/stackprotector.h> 15#include <linux/cpu.h> 16#include <linux/errno.h> 17#include <linux/sched.h> 18#include <linux/fs.h> 19#include <linux/kernel.h> 20#include <linux/mm.h> 21#include <linux/elfcore.h> 22#include <linux/smp.h> 23#include <linux/stddef.h> 24#include <linux/slab.h> 25#include <linux/vmalloc.h> 26#include <linux/user.h> 27#include <linux/interrupt.h> 28#include <linux/utsname.h> 29#include <linux/delay.h> 30#include <linux/reboot.h> 31#include <linux/init.h> 32#include <linux/mc146818rtc.h> 33#include <linux/module.h> 34#include <linux/kallsyms.h> 35#include <linux/ptrace.h> 36#include <linux/random.h> 37#include <linux/personality.h> 38#include <linux/tick.h> 39#include <linux/percpu.h> 40#include <linux/prctl.h> 41#include <linux/dmi.h> 42#include <linux/ftrace.h> 43#include <linux/uaccess.h> 44#include <linux/io.h> 45#include <linux/kdebug.h> 46 47#include <asm/pgtable.h> 48#include <asm/system.h> 49#include <asm/ldt.h> 50#include <asm/processor.h> 51#include <asm/i387.h> 52#include <asm/desc.h> 53#ifdef CONFIG_MATH_EMULATION 54#include <asm/math_emu.h> 55#endif 56 57#include <linux/err.h> 58 59#include <asm/tlbflush.h> 60#include <asm/cpu.h> 61#include <asm/idle.h> 62#include <asm/syscalls.h> 63#include <asm/ds.h> 64 65asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 66 67DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 68EXPORT_PER_CPU_SYMBOL(current_task); 69 70/* 71 * Return saved PC of a blocked thread. 72 */ 73unsigned long thread_saved_pc(struct task_struct *tsk) 74{ 75 return ((unsigned long *)tsk->thread.sp)[3]; 76} 77 78#ifndef CONFIG_SMP 79static inline void play_dead(void) 80{ 81 BUG(); 82} 83#endif 84 85/* 86 * The idle thread. There's no useful work to be 87 * done, so just try to conserve power and have a 88 * low exit latency (ie sit in a loop waiting for 89 * somebody to say that they'd like to reschedule) 90 */ 91void cpu_idle(void) 92{ 93 int cpu = smp_processor_id(); 94 95 /* 96 * If we're the non-boot CPU, nothing set the stack canary up 97 * for us. CPU0 already has it initialized but no harm in 98 * doing it again. This is a good place for updating it, as 99 * we wont ever return from this function (so the invalid 100 * canaries already on the stack wont ever trigger). 101 */ 102 boot_init_stack_canary(); 103 104 current_thread_info()->status |= TS_POLLING; 105 106 /* endless idle loop with no priority at all */ 107 while (1) { 108 tick_nohz_stop_sched_tick(1); 109 while (!need_resched()) { 110 111 check_pgt_cache(); 112 rmb(); 113 114 if (rcu_pending(cpu)) 115 rcu_check_callbacks(cpu, 0); 116 117 if (cpu_is_offline(cpu)) 118 play_dead(); 119 120 local_irq_disable(); 121 /* Don't trace irqs off for idle */ 122 stop_critical_timings(); 123 pm_idle(); 124 start_critical_timings(); 125 } 126 tick_nohz_restart_sched_tick(); 127 preempt_enable_no_resched(); 128 schedule(); 129 preempt_disable(); 130 } 131} 132 133void __show_regs(struct pt_regs *regs, int all) 134{ 135 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 136 unsigned long d0, d1, d2, d3, d6, d7; 137 unsigned long sp; 138 unsigned short ss, gs; 139 const char *board; 140 141 if (user_mode_vm(regs)) { 142 sp = regs->sp; 143 ss = regs->ss & 0xffff; 144 gs = get_user_gs(regs); 145 } else { 146 sp = (unsigned long) (®s->sp); 147 savesegment(ss, ss); 148 savesegment(gs, gs); 149 } 150 151 printk("\n"); 152 153 board = dmi_get_system_info(DMI_PRODUCT_NAME); 154 if (!board) 155 board = ""; 156 printk("Pid: %d, comm: %s %s (%s %.*s) %s\n", 157 task_pid_nr(current), current->comm, 158 print_tainted(), init_utsname()->release, 159 (int)strcspn(init_utsname()->version, " "), 160 init_utsname()->version, board); 161 162 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 163 (u16)regs->cs, regs->ip, regs->flags, 164 smp_processor_id()); 165 print_symbol("EIP is at %s\n", regs->ip); 166 167 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 168 regs->ax, regs->bx, regs->cx, regs->dx); 169 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", 170 regs->si, regs->di, regs->bp, sp); 171 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", 172 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); 173 174 if (!all) 175 return; 176 177 cr0 = read_cr0(); 178 cr2 = read_cr2(); 179 cr3 = read_cr3(); 180 cr4 = read_cr4_safe(); 181 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", 182 cr0, cr2, cr3, cr4); 183 184 get_debugreg(d0, 0); 185 get_debugreg(d1, 1); 186 get_debugreg(d2, 2); 187 get_debugreg(d3, 3); 188 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 189 d0, d1, d2, d3); 190 191 get_debugreg(d6, 6); 192 get_debugreg(d7, 7); 193 printk("DR6: %08lx DR7: %08lx\n", 194 d6, d7); 195} 196 197void show_regs(struct pt_regs *regs) 198{ 199 __show_regs(regs, 1); 200 show_trace(NULL, regs, ®s->sp, regs->bp); 201} 202 203/* 204 * This gets run with %bx containing the 205 * function to call, and %dx containing 206 * the "args". 207 */ 208extern void kernel_thread_helper(void); 209 210/* 211 * Create a kernel thread 212 */ 213int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 214{ 215 struct pt_regs regs; 216 217 memset(®s, 0, sizeof(regs)); 218 219 regs.bx = (unsigned long) fn; 220 regs.dx = (unsigned long) arg; 221 222 regs.ds = __USER_DS; 223 regs.es = __USER_DS; 224 regs.fs = __KERNEL_PERCPU; 225 regs.gs = __KERNEL_STACK_CANARY; 226 regs.orig_ax = -1; 227 regs.ip = (unsigned long) kernel_thread_helper; 228 regs.cs = __KERNEL_CS | get_kernel_rpl(); 229 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; 230 231 /* Ok, create the new process.. */ 232 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 233} 234EXPORT_SYMBOL(kernel_thread); 235 236/* 237 * Free current thread data structures etc.. 238 */ 239void exit_thread(void) 240{ 241 /* The process may have allocated an io port bitmap... nuke it. */ 242 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { 243 struct task_struct *tsk = current; 244 struct thread_struct *t = &tsk->thread; 245 int cpu = get_cpu(); 246 struct tss_struct *tss = &per_cpu(init_tss, cpu); 247 248 kfree(t->io_bitmap_ptr); 249 t->io_bitmap_ptr = NULL; 250 clear_thread_flag(TIF_IO_BITMAP); 251 /* 252 * Careful, clear this in the TSS too: 253 */ 254 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); 255 t->io_bitmap_max = 0; 256 tss->io_bitmap_owner = NULL; 257 tss->io_bitmap_max = 0; 258 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 259 put_cpu(); 260 } 261 262 ds_exit_thread(current); 263} 264 265void flush_thread(void) 266{ 267 struct task_struct *tsk = current; 268 269 tsk->thread.debugreg0 = 0; 270 tsk->thread.debugreg1 = 0; 271 tsk->thread.debugreg2 = 0; 272 tsk->thread.debugreg3 = 0; 273 tsk->thread.debugreg6 = 0; 274 tsk->thread.debugreg7 = 0; 275 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 276 clear_tsk_thread_flag(tsk, TIF_DEBUG); 277 /* 278 * Forget coprocessor state.. 279 */ 280 tsk->fpu_counter = 0; 281 clear_fpu(tsk); 282 clear_used_math(); 283} 284 285void release_thread(struct task_struct *dead_task) 286{ 287 BUG_ON(dead_task->mm); 288 release_vm86_irqs(dead_task); 289} 290 291/* 292 * This gets called before we allocate a new thread and copy 293 * the current task into it. 294 */ 295void prepare_to_copy(struct task_struct *tsk) 296{ 297 unlazy_fpu(tsk); 298} 299 300int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 301 unsigned long unused, 302 struct task_struct *p, struct pt_regs *regs) 303{ 304 struct pt_regs *childregs; 305 struct task_struct *tsk; 306 int err; 307 308 childregs = task_pt_regs(p); 309 *childregs = *regs; 310 childregs->ax = 0; 311 childregs->sp = sp; 312 313 p->thread.sp = (unsigned long) childregs; 314 p->thread.sp0 = (unsigned long) (childregs+1); 315 316 p->thread.ip = (unsigned long) ret_from_fork; 317 318 task_user_gs(p) = get_user_gs(regs); 319 320 tsk = current; 321 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 322 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, 323 IO_BITMAP_BYTES, GFP_KERNEL); 324 if (!p->thread.io_bitmap_ptr) { 325 p->thread.io_bitmap_max = 0; 326 return -ENOMEM; 327 } 328 set_tsk_thread_flag(p, TIF_IO_BITMAP); 329 } 330 331 err = 0; 332 333 /* 334 * Set a new TLS for the child thread? 335 */ 336 if (clone_flags & CLONE_SETTLS) 337 err = do_set_thread_area(p, -1, 338 (struct user_desc __user *)childregs->si, 0); 339 340 if (err && p->thread.io_bitmap_ptr) { 341 kfree(p->thread.io_bitmap_ptr); 342 p->thread.io_bitmap_max = 0; 343 } 344 345 ds_copy_thread(p, current); 346 347 clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR); 348 p->thread.debugctlmsr = 0; 349 350 return err; 351} 352 353void 354start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 355{ 356 set_user_gs(regs, 0); 357 regs->fs = 0; 358 set_fs(USER_DS); 359 regs->ds = __USER_DS; 360 regs->es = __USER_DS; 361 regs->ss = __USER_DS; 362 regs->cs = __USER_CS; 363 regs->ip = new_ip; 364 regs->sp = new_sp; 365 /* 366 * Free the old FP and other extended state 367 */ 368 free_thread_xstate(current); 369} 370EXPORT_SYMBOL_GPL(start_thread); 371 372static void hard_disable_TSC(void) 373{ 374 write_cr4(read_cr4() | X86_CR4_TSD); 375} 376 377void disable_TSC(void) 378{ 379 preempt_disable(); 380 if (!test_and_set_thread_flag(TIF_NOTSC)) 381 /* 382 * Must flip the CPU state synchronously with 383 * TIF_NOTSC in the current running context. 384 */ 385 hard_disable_TSC(); 386 preempt_enable(); 387} 388 389static void hard_enable_TSC(void) 390{ 391 write_cr4(read_cr4() & ~X86_CR4_TSD); 392} 393 394static void enable_TSC(void) 395{ 396 preempt_disable(); 397 if (test_and_clear_thread_flag(TIF_NOTSC)) 398 /* 399 * Must flip the CPU state synchronously with 400 * TIF_NOTSC in the current running context. 401 */ 402 hard_enable_TSC(); 403 preempt_enable(); 404} 405 406int get_tsc_mode(unsigned long adr) 407{ 408 unsigned int val; 409 410 if (test_thread_flag(TIF_NOTSC)) 411 val = PR_TSC_SIGSEGV; 412 else 413 val = PR_TSC_ENABLE; 414 415 return put_user(val, (unsigned int __user *)adr); 416} 417 418int set_tsc_mode(unsigned int val) 419{ 420 if (val == PR_TSC_SIGSEGV) 421 disable_TSC(); 422 else if (val == PR_TSC_ENABLE) 423 enable_TSC(); 424 else 425 return -EINVAL; 426 427 return 0; 428} 429 430static noinline void 431__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 432 struct tss_struct *tss) 433{ 434 struct thread_struct *prev, *next; 435 436 prev = &prev_p->thread; 437 next = &next_p->thread; 438 439 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || 440 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) 441 ds_switch_to(prev_p, next_p); 442 else if (next->debugctlmsr != prev->debugctlmsr) 443 update_debugctlmsr(next->debugctlmsr); 444 445 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { 446 set_debugreg(next->debugreg0, 0); 447 set_debugreg(next->debugreg1, 1); 448 set_debugreg(next->debugreg2, 2); 449 set_debugreg(next->debugreg3, 3); 450 /* no 4 and 5 */ 451 set_debugreg(next->debugreg6, 6); 452 set_debugreg(next->debugreg7, 7); 453 } 454 455 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 456 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 457 /* prev and next are different */ 458 if (test_tsk_thread_flag(next_p, TIF_NOTSC)) 459 hard_disable_TSC(); 460 else 461 hard_enable_TSC(); 462 } 463 464 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 465 /* 466 * Disable the bitmap via an invalid offset. We still cache 467 * the previous bitmap owner and the IO bitmap contents: 468 */ 469 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 470 return; 471 } 472 473 if (likely(next == tss->io_bitmap_owner)) { 474 /* 475 * Previous owner of the bitmap (hence the bitmap content) 476 * matches the next task, we dont have to do anything but 477 * to set a valid offset in the TSS: 478 */ 479 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 480 return; 481 } 482 /* 483 * Lazy TSS's I/O bitmap copy. We set an invalid offset here 484 * and we let the task to get a GPF in case an I/O instruction 485 * is performed. The handler of the GPF will verify that the 486 * faulting task has a valid I/O bitmap and, it true, does the 487 * real copy and restart the instruction. This will save us 488 * redundant copies when the currently switched task does not 489 * perform any I/O during its timeslice. 490 */ 491 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; 492} 493 494/* 495 * switch_to(x,yn) should switch tasks from x to y. 496 * 497 * We fsave/fwait so that an exception goes off at the right time 498 * (as a call from the fsave or fwait in effect) rather than to 499 * the wrong process. Lazy FP saving no longer makes any sense 500 * with modern CPU's, and this simplifies a lot of things (SMP 501 * and UP become the same). 502 * 503 * NOTE! We used to use the x86 hardware context switching. The 504 * reason for not using it any more becomes apparent when you 505 * try to recover gracefully from saved state that is no longer 506 * valid (stale segment register values in particular). With the 507 * hardware task-switch, there is no way to fix up bad state in 508 * a reasonable manner. 509 * 510 * The fact that Intel documents the hardware task-switching to 511 * be slow is a fairly red herring - this code is not noticeably 512 * faster. However, there _is_ some room for improvement here, 513 * so the performance issues may eventually be a valid point. 514 * More important, however, is the fact that this allows us much 515 * more flexibility. 516 * 517 * The return value (in %ax) will be the "prev" task after 518 * the task-switch, and shows up in ret_from_fork in entry.S, 519 * for example. 520 */ 521__notrace_funcgraph struct task_struct * 522__switch_to(struct task_struct *prev_p, struct task_struct *next_p) 523{ 524 struct thread_struct *prev = &prev_p->thread, 525 *next = &next_p->thread; 526 int cpu = smp_processor_id(); 527 struct tss_struct *tss = &per_cpu(init_tss, cpu); 528 529 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 530 531 __unlazy_fpu(prev_p); 532 533 534 /* we're going to use this soon, after a few expensive things */ 535 if (next_p->fpu_counter > 5) 536 prefetch(next->xstate); 537 538 /* 539 * Reload esp0. 540 */ 541 load_sp0(tss, next); 542 543 /* 544 * Save away %gs. No need to save %fs, as it was saved on the 545 * stack on entry. No need to save %es and %ds, as those are 546 * always kernel segments while inside the kernel. Doing this 547 * before setting the new TLS descriptors avoids the situation 548 * where we temporarily have non-reloadable segments in %fs 549 * and %gs. This could be an issue if the NMI handler ever 550 * used %fs or %gs (it does not today), or if the kernel is 551 * running inside of a hypervisor layer. 552 */ 553 lazy_save_gs(prev->gs); 554 555 /* 556 * Load the per-thread Thread-Local Storage descriptor. 557 */ 558 load_TLS(next, cpu); 559 560 /* 561 * Restore IOPL if needed. In normal use, the flags restore 562 * in the switch assembly will handle this. But if the kernel 563 * is running virtualized at a non-zero CPL, the popf will 564 * not restore flags, so it must be done in a separate step. 565 */ 566 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) 567 set_iopl_mask(next->iopl); 568 569 /* 570 * Now maybe handle debug registers and/or IO bitmaps 571 */ 572 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || 573 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 574 __switch_to_xtra(prev_p, next_p, tss); 575 576 /* 577 * Leave lazy mode, flushing any hypercalls made here. 578 * This must be done before restoring TLS segments so 579 * the GDT and LDT are properly updated, and must be 580 * done before math_state_restore, so the TS bit is up 581 * to date. 582 */ 583 arch_leave_lazy_cpu_mode(); 584 585 /* If the task has used fpu the last 5 timeslices, just do a full 586 * restore of the math state immediately to avoid the trap; the 587 * chances of needing FPU soon are obviously high now 588 * 589 * tsk_used_math() checks prevent calling math_state_restore(), 590 * which can sleep in the case of !tsk_used_math() 591 */ 592 if (tsk_used_math(next_p) && next_p->fpu_counter > 5) 593 math_state_restore(); 594 595 /* 596 * Restore %gs if needed (which is common) 597 */ 598 if (prev->gs | next->gs) 599 lazy_load_gs(next->gs); 600 601 percpu_write(current_task, next_p); 602 603 return prev_p; 604} 605 606asmlinkage int sys_fork(struct pt_regs regs) 607{ 608 return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 609} 610 611asmlinkage int sys_clone(struct pt_regs regs) 612{ 613 unsigned long clone_flags; 614 unsigned long newsp; 615 int __user *parent_tidptr, *child_tidptr; 616 617 clone_flags = regs.bx; 618 newsp = regs.cx; 619 parent_tidptr = (int __user *)regs.dx; 620 child_tidptr = (int __user *)regs.di; 621 if (!newsp) 622 newsp = regs.sp; 623 return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); 624} 625 626/* 627 * This is trivial, and on the face of it looks like it 628 * could equally well be done in user mode. 629 * 630 * Not so, for quite unobvious reasons - register pressure. 631 * In user mode vfork() cannot have a stack frame, and if 632 * done by calling the "clone()" system call directly, you 633 * do not have enough call-clobbered registers to hold all 634 * the information you need. 635 */ 636asmlinkage int sys_vfork(struct pt_regs regs) 637{ 638 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 639} 640 641/* 642 * sys_execve() executes a new program. 643 */ 644asmlinkage int sys_execve(struct pt_regs regs) 645{ 646 int error; 647 char *filename; 648 649 filename = getname((char __user *) regs.bx); 650 error = PTR_ERR(filename); 651 if (IS_ERR(filename)) 652 goto out; 653 error = do_execve(filename, 654 (char __user * __user *) regs.cx, 655 (char __user * __user *) regs.dx, 656 ®s); 657 if (error == 0) { 658 /* Make sure we don't return using sysenter.. */ 659 set_thread_flag(TIF_IRET); 660 } 661 putname(filename); 662out: 663 return error; 664} 665 666#define top_esp (THREAD_SIZE - sizeof(unsigned long)) 667#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) 668 669unsigned long get_wchan(struct task_struct *p) 670{ 671 unsigned long bp, sp, ip; 672 unsigned long stack_page; 673 int count = 0; 674 if (!p || p == current || p->state == TASK_RUNNING) 675 return 0; 676 stack_page = (unsigned long)task_stack_page(p); 677 sp = p->thread.sp; 678 if (!stack_page || sp < stack_page || sp > top_esp+stack_page) 679 return 0; 680 /* include/asm-i386/system.h:switch_to() pushes bp last. */ 681 bp = *(unsigned long *) sp; 682 do { 683 if (bp < stack_page || bp > top_ebp+stack_page) 684 return 0; 685 ip = *(unsigned long *) (bp+4); 686 if (!in_sched_functions(ip)) 687 return ip; 688 bp = *(unsigned long *) bp; 689 } while (count++ < 16); 690 return 0; 691} 692 693unsigned long arch_align_stack(unsigned long sp) 694{ 695 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 696 sp -= get_random_int() % 8192; 697 return sp & ~0xf; 698} 699 700unsigned long arch_randomize_brk(struct mm_struct *mm) 701{ 702 unsigned long range_end = mm->brk + 0x02000000; 703 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 704} 705