process_32.c revision 0723a69a63beec1ca6e792239ef75d0181387ef0
1/* 2 * Copyright (C) 1995 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * Gareth Hughes <gareth@valinux.com>, May 2000 6 */ 7 8/* 9 * This file handles the architecture-dependent parts of process handling.. 10 */ 11 12#include <stdarg.h> 13 14#include <linux/cpu.h> 15#include <linux/errno.h> 16#include <linux/sched.h> 17#include <linux/fs.h> 18#include <linux/kernel.h> 19#include <linux/mm.h> 20#include <linux/elfcore.h> 21#include <linux/smp.h> 22#include <linux/stddef.h> 23#include <linux/slab.h> 24#include <linux/vmalloc.h> 25#include <linux/user.h> 26#include <linux/a.out.h> 27#include <linux/interrupt.h> 28#include <linux/utsname.h> 29#include <linux/delay.h> 30#include <linux/reboot.h> 31#include <linux/init.h> 32#include <linux/mc146818rtc.h> 33#include <linux/module.h> 34#include <linux/kallsyms.h> 35#include <linux/ptrace.h> 36#include <linux/random.h> 37#include <linux/personality.h> 38#include <linux/tick.h> 39#include <linux/percpu.h> 40 41#include <asm/uaccess.h> 42#include <asm/pgtable.h> 43#include <asm/system.h> 44#include <asm/io.h> 45#include <asm/ldt.h> 46#include <asm/processor.h> 47#include <asm/i387.h> 48#include <asm/desc.h> 49#include <asm/vm86.h> 50#ifdef CONFIG_MATH_EMULATION 51#include <asm/math_emu.h> 52#endif 53 54#include <linux/err.h> 55 56#include <asm/tlbflush.h> 57#include <asm/cpu.h> 58#include <asm/kdebug.h> 59 60asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 61 62static int hlt_counter; 63 64unsigned long boot_option_idle_override = 0; 65EXPORT_SYMBOL(boot_option_idle_override); 66 67DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 68EXPORT_PER_CPU_SYMBOL(current_task); 69 70DEFINE_PER_CPU(int, cpu_number); 71EXPORT_PER_CPU_SYMBOL(cpu_number); 72 73/* 74 * Return saved PC of a blocked thread. 75 */ 76unsigned long thread_saved_pc(struct task_struct *tsk) 77{ 78 return ((unsigned long *)tsk->thread.sp)[3]; 79} 80 81/* 82 * Powermanagement idle function, if any.. 83 */ 84void (*pm_idle)(void); 85EXPORT_SYMBOL(pm_idle); 86static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 87 88void disable_hlt(void) 89{ 90 hlt_counter++; 91} 92 93EXPORT_SYMBOL(disable_hlt); 94 95void enable_hlt(void) 96{ 97 hlt_counter--; 98} 99 100EXPORT_SYMBOL(enable_hlt); 101 102/* 103 * We use this if we don't have any better 104 * idle routine.. 105 */ 106void default_idle(void) 107{ 108 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 109 current_thread_info()->status &= ~TS_POLLING; 110 /* 111 * TS_POLLING-cleared state must be visible before we 112 * test NEED_RESCHED: 113 */ 114 smp_mb(); 115 116 local_irq_disable(); 117 if (!need_resched()) { 118 ktime_t t0, t1; 119 u64 t0n, t1n; 120 121 t0 = ktime_get(); 122 t0n = ktime_to_ns(t0); 123 safe_halt(); /* enables interrupts racelessly */ 124 local_irq_disable(); 125 t1 = ktime_get(); 126 t1n = ktime_to_ns(t1); 127 sched_clock_idle_wakeup_event(t1n - t0n); 128 } 129 local_irq_enable(); 130 current_thread_info()->status |= TS_POLLING; 131 } else { 132 /* loop is done by the caller */ 133 cpu_relax(); 134 } 135} 136#ifdef CONFIG_APM_MODULE 137EXPORT_SYMBOL(default_idle); 138#endif 139 140/* 141 * On SMP it's slightly faster (but much more power-consuming!) 142 * to poll the ->work.need_resched flag instead of waiting for the 143 * cross-CPU IPI to arrive. Use this option with caution. 144 */ 145static void poll_idle(void) 146{ 147 cpu_relax(); 148} 149 150#ifdef CONFIG_HOTPLUG_CPU 151#include <asm/nmi.h> 152/* We don't actually take CPU down, just spin without interrupts. */ 153static inline void play_dead(void) 154{ 155 /* This must be done before dead CPU ack */ 156 cpu_exit_clear(); 157 wbinvd(); 158 mb(); 159 /* Ack it */ 160 __get_cpu_var(cpu_state) = CPU_DEAD; 161 162 /* 163 * With physical CPU hotplug, we should halt the cpu 164 */ 165 local_irq_disable(); 166 while (1) 167 halt(); 168} 169#else 170static inline void play_dead(void) 171{ 172 BUG(); 173} 174#endif /* CONFIG_HOTPLUG_CPU */ 175 176/* 177 * The idle thread. There's no useful work to be 178 * done, so just try to conserve power and have a 179 * low exit latency (ie sit in a loop waiting for 180 * somebody to say that they'd like to reschedule) 181 */ 182void cpu_idle(void) 183{ 184 int cpu = smp_processor_id(); 185 186 current_thread_info()->status |= TS_POLLING; 187 188 /* endless idle loop with no priority at all */ 189 while (1) { 190 tick_nohz_stop_sched_tick(); 191 while (!need_resched()) { 192 void (*idle)(void); 193 194 if (__get_cpu_var(cpu_idle_state)) 195 __get_cpu_var(cpu_idle_state) = 0; 196 197 check_pgt_cache(); 198 rmb(); 199 idle = pm_idle; 200 201 if (rcu_pending(cpu)) 202 rcu_check_callbacks(cpu, 0); 203 204 if (!idle) 205 idle = default_idle; 206 207 if (cpu_is_offline(cpu)) 208 play_dead(); 209 210 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 211 idle(); 212 } 213 tick_nohz_restart_sched_tick(); 214 preempt_enable_no_resched(); 215 schedule(); 216 preempt_disable(); 217 } 218} 219 220static void do_nothing(void *unused) 221{ 222} 223 224void cpu_idle_wait(void) 225{ 226 unsigned int cpu, this_cpu = get_cpu(); 227 cpumask_t map, tmp = current->cpus_allowed; 228 229 set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); 230 put_cpu(); 231 232 cpus_clear(map); 233 for_each_online_cpu(cpu) { 234 per_cpu(cpu_idle_state, cpu) = 1; 235 cpu_set(cpu, map); 236 } 237 238 __get_cpu_var(cpu_idle_state) = 0; 239 240 wmb(); 241 do { 242 ssleep(1); 243 for_each_online_cpu(cpu) { 244 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) 245 cpu_clear(cpu, map); 246 } 247 cpus_and(map, map, cpu_online_map); 248 /* 249 * We waited 1 sec, if a CPU still did not call idle 250 * it may be because it is in idle and not waking up 251 * because it has nothing to do. 252 * Give all the remaining CPUS a kick. 253 */ 254 smp_call_function_mask(map, do_nothing, 0, 0); 255 } while (!cpus_empty(map)); 256 257 set_cpus_allowed(current, tmp); 258} 259EXPORT_SYMBOL_GPL(cpu_idle_wait); 260 261/* 262 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, 263 * which can obviate IPI to trigger checking of need_resched. 264 * We execute MONITOR against need_resched and enter optimized wait state 265 * through MWAIT. Whenever someone changes need_resched, we would be woken 266 * up from MWAIT (without an IPI). 267 * 268 * New with Core Duo processors, MWAIT can take some hints based on CPU 269 * capability. 270 */ 271void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 272{ 273 if (!need_resched()) { 274 __monitor((void *)¤t_thread_info()->flags, 0, 0); 275 smp_mb(); 276 if (!need_resched()) 277 __mwait(ax, cx); 278 } 279} 280 281/* Default MONITOR/MWAIT with no hints, used for default C1 state */ 282static void mwait_idle(void) 283{ 284 local_irq_enable(); 285 mwait_idle_with_hints(0, 0); 286} 287 288void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 289{ 290 if (cpu_has(c, X86_FEATURE_MWAIT)) { 291 printk("monitor/mwait feature present.\n"); 292 /* 293 * Skip, if setup has overridden idle. 294 * One CPU supports mwait => All CPUs supports mwait 295 */ 296 if (!pm_idle) { 297 printk("using mwait in idle threads.\n"); 298 pm_idle = mwait_idle; 299 } 300 } 301} 302 303static int __init idle_setup(char *str) 304{ 305 if (!strcmp(str, "poll")) { 306 printk("using polling idle threads.\n"); 307 pm_idle = poll_idle; 308#ifdef CONFIG_X86_SMP 309 if (smp_num_siblings > 1) 310 printk("WARNING: polling idle and HT enabled, performance may degrade.\n"); 311#endif 312 } else if (!strcmp(str, "mwait")) 313 force_mwait = 1; 314 else 315 return -1; 316 317 boot_option_idle_override = 1; 318 return 0; 319} 320early_param("idle", idle_setup); 321 322void __show_registers(struct pt_regs *regs, int all) 323{ 324 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 325 unsigned long d0, d1, d2, d3, d6, d7; 326 unsigned long sp; 327 unsigned short ss, gs; 328 329 if (user_mode_vm(regs)) { 330 sp = regs->sp; 331 ss = regs->ss & 0xffff; 332 savesegment(gs, gs); 333 } else { 334 sp = (unsigned long) (®s->sp); 335 savesegment(ss, ss); 336 savesegment(gs, gs); 337 } 338 339 printk("\n"); 340 printk("Pid: %d, comm: %s %s (%s %.*s)\n", 341 task_pid_nr(current), current->comm, 342 print_tainted(), init_utsname()->release, 343 (int)strcspn(init_utsname()->version, " "), 344 init_utsname()->version); 345 346 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 347 0xffff & regs->cs, regs->ip, regs->flags, 348 smp_processor_id()); 349 print_symbol("EIP is at %s\n", regs->ip); 350 351 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 352 regs->ax, regs->bx, regs->cx, regs->dx); 353 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", 354 regs->si, regs->di, regs->bp, sp); 355 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", 356 regs->ds & 0xffff, regs->es & 0xffff, 357 regs->fs & 0xffff, gs, ss); 358 359 if (!all) 360 return; 361 362 cr0 = read_cr0(); 363 cr2 = read_cr2(); 364 cr3 = read_cr3(); 365 cr4 = read_cr4_safe(); 366 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", 367 cr0, cr2, cr3, cr4); 368 369 get_debugreg(d0, 0); 370 get_debugreg(d1, 1); 371 get_debugreg(d2, 2); 372 get_debugreg(d3, 3); 373 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 374 d0, d1, d2, d3); 375 376 get_debugreg(d6, 6); 377 get_debugreg(d7, 7); 378 printk("DR6: %08lx DR7: %08lx\n", 379 d6, d7); 380} 381 382void show_regs(struct pt_regs *regs) 383{ 384 __show_registers(regs, 1); 385 show_trace(NULL, regs, ®s->sp, regs->bp); 386} 387 388/* 389 * This gets run with %bx containing the 390 * function to call, and %dx containing 391 * the "args". 392 */ 393extern void kernel_thread_helper(void); 394 395/* 396 * Create a kernel thread 397 */ 398int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 399{ 400 struct pt_regs regs; 401 402 memset(®s, 0, sizeof(regs)); 403 404 regs.bx = (unsigned long) fn; 405 regs.dx = (unsigned long) arg; 406 407 regs.ds = __USER_DS; 408 regs.es = __USER_DS; 409 regs.fs = __KERNEL_PERCPU; 410 regs.orig_ax = -1; 411 regs.ip = (unsigned long) kernel_thread_helper; 412 regs.cs = __KERNEL_CS | get_kernel_rpl(); 413 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; 414 415 /* Ok, create the new process.. */ 416 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 417} 418EXPORT_SYMBOL(kernel_thread); 419 420/* 421 * Free current thread data structures etc.. 422 */ 423void exit_thread(void) 424{ 425 /* The process may have allocated an io port bitmap... nuke it. */ 426 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { 427 struct task_struct *tsk = current; 428 struct thread_struct *t = &tsk->thread; 429 int cpu = get_cpu(); 430 struct tss_struct *tss = &per_cpu(init_tss, cpu); 431 432 kfree(t->io_bitmap_ptr); 433 t->io_bitmap_ptr = NULL; 434 clear_thread_flag(TIF_IO_BITMAP); 435 /* 436 * Careful, clear this in the TSS too: 437 */ 438 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); 439 t->io_bitmap_max = 0; 440 tss->io_bitmap_owner = NULL; 441 tss->io_bitmap_max = 0; 442 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 443 put_cpu(); 444 } 445} 446 447void flush_thread(void) 448{ 449 struct task_struct *tsk = current; 450 451 tsk->thread.debugreg0 = 0; 452 tsk->thread.debugreg1 = 0; 453 tsk->thread.debugreg2 = 0; 454 tsk->thread.debugreg3 = 0; 455 tsk->thread.debugreg6 = 0; 456 tsk->thread.debugreg7 = 0; 457 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 458 clear_tsk_thread_flag(tsk, TIF_DEBUG); 459 /* 460 * Forget coprocessor state.. 461 */ 462 clear_fpu(tsk); 463 clear_used_math(); 464} 465 466void release_thread(struct task_struct *dead_task) 467{ 468 BUG_ON(dead_task->mm); 469 release_vm86_irqs(dead_task); 470} 471 472/* 473 * This gets called before we allocate a new thread and copy 474 * the current task into it. 475 */ 476void prepare_to_copy(struct task_struct *tsk) 477{ 478 unlazy_fpu(tsk); 479} 480 481int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 482 unsigned long unused, 483 struct task_struct * p, struct pt_regs * regs) 484{ 485 struct pt_regs * childregs; 486 struct task_struct *tsk; 487 int err; 488 489 childregs = task_pt_regs(p); 490 *childregs = *regs; 491 childregs->ax = 0; 492 childregs->sp = sp; 493 494 p->thread.sp = (unsigned long) childregs; 495 p->thread.sp0 = (unsigned long) (childregs+1); 496 497 p->thread.ip = (unsigned long) ret_from_fork; 498 499 savesegment(gs, p->thread.gs); 500 501 tsk = current; 502 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 503 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, 504 IO_BITMAP_BYTES, GFP_KERNEL); 505 if (!p->thread.io_bitmap_ptr) { 506 p->thread.io_bitmap_max = 0; 507 return -ENOMEM; 508 } 509 set_tsk_thread_flag(p, TIF_IO_BITMAP); 510 } 511 512 err = 0; 513 514 /* 515 * Set a new TLS for the child thread? 516 */ 517 if (clone_flags & CLONE_SETTLS) 518 err = do_set_thread_area(p, -1, 519 (struct user_desc __user *)childregs->si, 0); 520 521 if (err && p->thread.io_bitmap_ptr) { 522 kfree(p->thread.io_bitmap_ptr); 523 p->thread.io_bitmap_max = 0; 524 } 525 return err; 526} 527 528/* 529 * fill in the user structure for a core dump.. 530 */ 531void dump_thread(struct pt_regs * regs, struct user * dump) 532{ 533 u16 gs; 534 535/* changed the size calculations - should hopefully work better. lbt */ 536 dump->magic = CMAGIC; 537 dump->start_code = 0; 538 dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); 539 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; 540 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; 541 dump->u_dsize -= dump->u_tsize; 542 dump->u_ssize = 0; 543 dump->u_debugreg[0] = current->thread.debugreg0; 544 dump->u_debugreg[1] = current->thread.debugreg1; 545 dump->u_debugreg[2] = current->thread.debugreg2; 546 dump->u_debugreg[3] = current->thread.debugreg3; 547 dump->u_debugreg[4] = 0; 548 dump->u_debugreg[5] = 0; 549 dump->u_debugreg[6] = current->thread.debugreg6; 550 dump->u_debugreg[7] = current->thread.debugreg7; 551 552 if (dump->start_stack < TASK_SIZE) 553 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; 554 555 dump->regs.bx = regs->bx; 556 dump->regs.cx = regs->cx; 557 dump->regs.dx = regs->dx; 558 dump->regs.si = regs->si; 559 dump->regs.di = regs->di; 560 dump->regs.bp = regs->bp; 561 dump->regs.ax = regs->ax; 562 dump->regs.ds = (u16)regs->ds; 563 dump->regs.es = (u16)regs->es; 564 dump->regs.fs = (u16)regs->fs; 565 savesegment(gs,gs); 566 dump->regs.orig_ax = regs->orig_ax; 567 dump->regs.ip = regs->ip; 568 dump->regs.cs = (u16)regs->cs; 569 dump->regs.flags = regs->flags; 570 dump->regs.sp = regs->sp; 571 dump->regs.ss = (u16)regs->ss; 572 573 dump->u_fpvalid = dump_fpu (regs, &dump->i387); 574} 575EXPORT_SYMBOL(dump_thread); 576 577#ifdef CONFIG_SECCOMP 578static void hard_disable_TSC(void) 579{ 580 write_cr4(read_cr4() | X86_CR4_TSD); 581} 582void disable_TSC(void) 583{ 584 preempt_disable(); 585 if (!test_and_set_thread_flag(TIF_NOTSC)) 586 /* 587 * Must flip the CPU state synchronously with 588 * TIF_NOTSC in the current running context. 589 */ 590 hard_disable_TSC(); 591 preempt_enable(); 592} 593static void hard_enable_TSC(void) 594{ 595 write_cr4(read_cr4() & ~X86_CR4_TSD); 596} 597#endif /* CONFIG_SECCOMP */ 598 599static noinline void 600__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 601 struct tss_struct *tss) 602{ 603 struct thread_struct *prev, *next; 604 unsigned long debugctl; 605 606 prev = &prev_p->thread; 607 next = &next_p->thread; 608 609 debugctl = prev->debugctlmsr; 610 if (next->ds_area_msr != prev->ds_area_msr) { 611 /* we clear debugctl to make sure DS 612 * is not in use when we change it */ 613 debugctl = 0; 614 wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); 615 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); 616 } 617 618 if (next->debugctlmsr != debugctl) 619 wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0); 620 621 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { 622 set_debugreg(next->debugreg0, 0); 623 set_debugreg(next->debugreg1, 1); 624 set_debugreg(next->debugreg2, 2); 625 set_debugreg(next->debugreg3, 3); 626 /* no 4 and 5 */ 627 set_debugreg(next->debugreg6, 6); 628 set_debugreg(next->debugreg7, 7); 629 } 630 631#ifdef CONFIG_SECCOMP 632 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 633 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 634 /* prev and next are different */ 635 if (test_tsk_thread_flag(next_p, TIF_NOTSC)) 636 hard_disable_TSC(); 637 else 638 hard_enable_TSC(); 639 } 640#endif 641 642 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 643 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 644 645 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 646 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 647 648 649 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 650 /* 651 * Disable the bitmap via an invalid offset. We still cache 652 * the previous bitmap owner and the IO bitmap contents: 653 */ 654 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 655 return; 656 } 657 658 if (likely(next == tss->io_bitmap_owner)) { 659 /* 660 * Previous owner of the bitmap (hence the bitmap content) 661 * matches the next task, we dont have to do anything but 662 * to set a valid offset in the TSS: 663 */ 664 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 665 return; 666 } 667 /* 668 * Lazy TSS's I/O bitmap copy. We set an invalid offset here 669 * and we let the task to get a GPF in case an I/O instruction 670 * is performed. The handler of the GPF will verify that the 671 * faulting task has a valid I/O bitmap and, it true, does the 672 * real copy and restart the instruction. This will save us 673 * redundant copies when the currently switched task does not 674 * perform any I/O during its timeslice. 675 */ 676 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; 677} 678 679/* 680 * switch_to(x,yn) should switch tasks from x to y. 681 * 682 * We fsave/fwait so that an exception goes off at the right time 683 * (as a call from the fsave or fwait in effect) rather than to 684 * the wrong process. Lazy FP saving no longer makes any sense 685 * with modern CPU's, and this simplifies a lot of things (SMP 686 * and UP become the same). 687 * 688 * NOTE! We used to use the x86 hardware context switching. The 689 * reason for not using it any more becomes apparent when you 690 * try to recover gracefully from saved state that is no longer 691 * valid (stale segment register values in particular). With the 692 * hardware task-switch, there is no way to fix up bad state in 693 * a reasonable manner. 694 * 695 * The fact that Intel documents the hardware task-switching to 696 * be slow is a fairly red herring - this code is not noticeably 697 * faster. However, there _is_ some room for improvement here, 698 * so the performance issues may eventually be a valid point. 699 * More important, however, is the fact that this allows us much 700 * more flexibility. 701 * 702 * The return value (in %ax) will be the "prev" task after 703 * the task-switch, and shows up in ret_from_fork in entry.S, 704 * for example. 705 */ 706struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) 707{ 708 struct thread_struct *prev = &prev_p->thread, 709 *next = &next_p->thread; 710 int cpu = smp_processor_id(); 711 struct tss_struct *tss = &per_cpu(init_tss, cpu); 712 713 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 714 715 __unlazy_fpu(prev_p); 716 717 718 /* we're going to use this soon, after a few expensive things */ 719 if (next_p->fpu_counter > 5) 720 prefetch(&next->i387.fxsave); 721 722 /* 723 * Reload esp0. 724 */ 725 load_sp0(tss, next); 726 727 /* 728 * Save away %gs. No need to save %fs, as it was saved on the 729 * stack on entry. No need to save %es and %ds, as those are 730 * always kernel segments while inside the kernel. Doing this 731 * before setting the new TLS descriptors avoids the situation 732 * where we temporarily have non-reloadable segments in %fs 733 * and %gs. This could be an issue if the NMI handler ever 734 * used %fs or %gs (it does not today), or if the kernel is 735 * running inside of a hypervisor layer. 736 */ 737 savesegment(gs, prev->gs); 738 739 /* 740 * Load the per-thread Thread-Local Storage descriptor. 741 */ 742 load_TLS(next, cpu); 743 744 /* 745 * Restore IOPL if needed. In normal use, the flags restore 746 * in the switch assembly will handle this. But if the kernel 747 * is running virtualized at a non-zero CPL, the popf will 748 * not restore flags, so it must be done in a separate step. 749 */ 750 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) 751 set_iopl_mask(next->iopl); 752 753 /* 754 * Now maybe handle debug registers and/or IO bitmaps 755 */ 756 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || 757 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 758 __switch_to_xtra(prev_p, next_p, tss); 759 760 /* 761 * Leave lazy mode, flushing any hypercalls made here. 762 * This must be done before restoring TLS segments so 763 * the GDT and LDT are properly updated, and must be 764 * done before math_state_restore, so the TS bit is up 765 * to date. 766 */ 767 arch_leave_lazy_cpu_mode(); 768 769 /* If the task has used fpu the last 5 timeslices, just do a full 770 * restore of the math state immediately to avoid the trap; the 771 * chances of needing FPU soon are obviously high now 772 */ 773 if (next_p->fpu_counter > 5) 774 math_state_restore(); 775 776 /* 777 * Restore %gs if needed (which is common) 778 */ 779 if (prev->gs | next->gs) 780 loadsegment(gs, next->gs); 781 782 x86_write_percpu(current_task, next_p); 783 784 return prev_p; 785} 786 787asmlinkage int sys_fork(struct pt_regs regs) 788{ 789 return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 790} 791 792asmlinkage int sys_clone(struct pt_regs regs) 793{ 794 unsigned long clone_flags; 795 unsigned long newsp; 796 int __user *parent_tidptr, *child_tidptr; 797 798 clone_flags = regs.bx; 799 newsp = regs.cx; 800 parent_tidptr = (int __user *)regs.dx; 801 child_tidptr = (int __user *)regs.di; 802 if (!newsp) 803 newsp = regs.sp; 804 return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); 805} 806 807/* 808 * This is trivial, and on the face of it looks like it 809 * could equally well be done in user mode. 810 * 811 * Not so, for quite unobvious reasons - register pressure. 812 * In user mode vfork() cannot have a stack frame, and if 813 * done by calling the "clone()" system call directly, you 814 * do not have enough call-clobbered registers to hold all 815 * the information you need. 816 */ 817asmlinkage int sys_vfork(struct pt_regs regs) 818{ 819 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 820} 821 822/* 823 * sys_execve() executes a new program. 824 */ 825asmlinkage int sys_execve(struct pt_regs regs) 826{ 827 int error; 828 char * filename; 829 830 filename = getname((char __user *) regs.bx); 831 error = PTR_ERR(filename); 832 if (IS_ERR(filename)) 833 goto out; 834 error = do_execve(filename, 835 (char __user * __user *) regs.cx, 836 (char __user * __user *) regs.dx, 837 ®s); 838 if (error == 0) { 839 /* Make sure we don't return using sysenter.. */ 840 set_thread_flag(TIF_IRET); 841 } 842 putname(filename); 843out: 844 return error; 845} 846 847#define top_esp (THREAD_SIZE - sizeof(unsigned long)) 848#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) 849 850unsigned long get_wchan(struct task_struct *p) 851{ 852 unsigned long bp, sp, ip; 853 unsigned long stack_page; 854 int count = 0; 855 if (!p || p == current || p->state == TASK_RUNNING) 856 return 0; 857 stack_page = (unsigned long)task_stack_page(p); 858 sp = p->thread.sp; 859 if (!stack_page || sp < stack_page || sp > top_esp+stack_page) 860 return 0; 861 /* include/asm-i386/system.h:switch_to() pushes bp last. */ 862 bp = *(unsigned long *) sp; 863 do { 864 if (bp < stack_page || bp > top_ebp+stack_page) 865 return 0; 866 ip = *(unsigned long *) (bp+4); 867 if (!in_sched_functions(ip)) 868 return ip; 869 bp = *(unsigned long *) bp; 870 } while (count++ < 16); 871 return 0; 872} 873 874unsigned long arch_align_stack(unsigned long sp) 875{ 876 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 877 sp -= get_random_int() % 8192; 878 return sp & ~0xf; 879} 880 881unsigned long arch_randomize_brk(struct mm_struct *mm) 882{ 883 unsigned long range_end = mm->brk + 0x02000000; 884 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 885} 886