process_32.c revision a4928cffe6435caf427ae673131a633c1329dbf3
1/* 2 * Copyright (C) 1995 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * Gareth Hughes <gareth@valinux.com>, May 2000 6 */ 7 8/* 9 * This file handles the architecture-dependent parts of process handling.. 10 */ 11 12#include <stdarg.h> 13 14#include <linux/cpu.h> 15#include <linux/errno.h> 16#include <linux/sched.h> 17#include <linux/fs.h> 18#include <linux/kernel.h> 19#include <linux/mm.h> 20#include <linux/elfcore.h> 21#include <linux/smp.h> 22#include <linux/stddef.h> 23#include <linux/slab.h> 24#include <linux/vmalloc.h> 25#include <linux/user.h> 26#include <linux/interrupt.h> 27#include <linux/utsname.h> 28#include <linux/delay.h> 29#include <linux/reboot.h> 30#include <linux/init.h> 31#include <linux/mc146818rtc.h> 32#include <linux/module.h> 33#include <linux/kallsyms.h> 34#include <linux/ptrace.h> 35#include <linux/random.h> 36#include <linux/personality.h> 37#include <linux/tick.h> 38#include <linux/percpu.h> 39#include <linux/prctl.h> 40 41#include <asm/uaccess.h> 42#include <asm/pgtable.h> 43#include <asm/system.h> 44#include <asm/io.h> 45#include <asm/ldt.h> 46#include <asm/processor.h> 47#include <asm/i387.h> 48#include <asm/desc.h> 49#ifdef CONFIG_MATH_EMULATION 50#include <asm/math_emu.h> 51#endif 52 53#include <linux/err.h> 54 55#include <asm/tlbflush.h> 56#include <asm/cpu.h> 57#include <asm/kdebug.h> 58 59asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 60 61static int hlt_counter; 62 63unsigned long boot_option_idle_override = 0; 64EXPORT_SYMBOL(boot_option_idle_override); 65 66DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 67EXPORT_PER_CPU_SYMBOL(current_task); 68 69DEFINE_PER_CPU(int, cpu_number); 70EXPORT_PER_CPU_SYMBOL(cpu_number); 71 72/* 73 * Return saved PC of a blocked thread. 74 */ 75unsigned long thread_saved_pc(struct task_struct *tsk) 76{ 77 return ((unsigned long *)tsk->thread.sp)[3]; 78} 79 80/* 81 * Powermanagement idle function, if any.. 82 */ 83void (*pm_idle)(void); 84EXPORT_SYMBOL(pm_idle); 85 86void disable_hlt(void) 87{ 88 hlt_counter++; 89} 90 91EXPORT_SYMBOL(disable_hlt); 92 93void enable_hlt(void) 94{ 95 hlt_counter--; 96} 97 98EXPORT_SYMBOL(enable_hlt); 99 100/* 101 * We use this if we don't have any better 102 * idle routine.. 103 */ 104void default_idle(void) 105{ 106 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 107 current_thread_info()->status &= ~TS_POLLING; 108 /* 109 * TS_POLLING-cleared state must be visible before we 110 * test NEED_RESCHED: 111 */ 112 smp_mb(); 113 114 local_irq_disable(); 115 if (!need_resched()) { 116 safe_halt(); /* enables interrupts racelessly */ 117 local_irq_disable(); 118 } 119 local_irq_enable(); 120 current_thread_info()->status |= TS_POLLING; 121 } else { 122 local_irq_enable(); 123 /* loop is done by the caller */ 124 cpu_relax(); 125 } 126} 127#ifdef CONFIG_APM_MODULE 128EXPORT_SYMBOL(default_idle); 129#endif 130 131/* 132 * On SMP it's slightly faster (but much more power-consuming!) 133 * to poll the ->work.need_resched flag instead of waiting for the 134 * cross-CPU IPI to arrive. Use this option with caution. 135 */ 136static void poll_idle(void) 137{ 138 local_irq_enable(); 139 cpu_relax(); 140} 141 142#ifdef CONFIG_HOTPLUG_CPU 143#include <asm/nmi.h> 144/* We don't actually take CPU down, just spin without interrupts. */ 145static inline void play_dead(void) 146{ 147 /* This must be done before dead CPU ack */ 148 cpu_exit_clear(); 149 wbinvd(); 150 mb(); 151 /* Ack it */ 152 __get_cpu_var(cpu_state) = CPU_DEAD; 153 154 /* 155 * With physical CPU hotplug, we should halt the cpu 156 */ 157 local_irq_disable(); 158 while (1) 159 halt(); 160} 161#else 162static inline void play_dead(void) 163{ 164 BUG(); 165} 166#endif /* CONFIG_HOTPLUG_CPU */ 167 168/* 169 * The idle thread. There's no useful work to be 170 * done, so just try to conserve power and have a 171 * low exit latency (ie sit in a loop waiting for 172 * somebody to say that they'd like to reschedule) 173 */ 174void cpu_idle(void) 175{ 176 int cpu = smp_processor_id(); 177 178 current_thread_info()->status |= TS_POLLING; 179 180 /* endless idle loop with no priority at all */ 181 while (1) { 182 tick_nohz_stop_sched_tick(); 183 while (!need_resched()) { 184 void (*idle)(void); 185 186 check_pgt_cache(); 187 rmb(); 188 idle = pm_idle; 189 190 if (rcu_pending(cpu)) 191 rcu_check_callbacks(cpu, 0); 192 193 if (!idle) 194 idle = default_idle; 195 196 if (cpu_is_offline(cpu)) 197 play_dead(); 198 199 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 200 idle(); 201 } 202 tick_nohz_restart_sched_tick(); 203 preempt_enable_no_resched(); 204 schedule(); 205 preempt_disable(); 206 } 207} 208 209static void do_nothing(void *unused) 210{ 211} 212 213/* 214 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 215 * pm_idle and update to new pm_idle value. Required while changing pm_idle 216 * handler on SMP systems. 217 * 218 * Caller must have changed pm_idle to the new value before the call. Old 219 * pm_idle value will not be used by any CPU after the return of this function. 220 */ 221void cpu_idle_wait(void) 222{ 223 smp_mb(); 224 /* kick all the CPUs so that they exit out of pm_idle */ 225 smp_call_function(do_nothing, NULL, 0, 1); 226} 227EXPORT_SYMBOL_GPL(cpu_idle_wait); 228 229/* 230 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, 231 * which can obviate IPI to trigger checking of need_resched. 232 * We execute MONITOR against need_resched and enter optimized wait state 233 * through MWAIT. Whenever someone changes need_resched, we would be woken 234 * up from MWAIT (without an IPI). 235 * 236 * New with Core Duo processors, MWAIT can take some hints based on CPU 237 * capability. 238 */ 239void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 240{ 241 if (!need_resched()) { 242 __monitor((void *)¤t_thread_info()->flags, 0, 0); 243 smp_mb(); 244 if (!need_resched()) 245 __sti_mwait(ax, cx); 246 else 247 local_irq_enable(); 248 } else 249 local_irq_enable(); 250} 251 252/* Default MONITOR/MWAIT with no hints, used for default C1 state */ 253static void mwait_idle(void) 254{ 255 local_irq_enable(); 256 mwait_idle_with_hints(0, 0); 257} 258 259static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 260{ 261 if (force_mwait) 262 return 1; 263 /* Any C1 states supported? */ 264 return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; 265} 266 267void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 268{ 269 static int selected; 270 271 if (selected) 272 return; 273#ifdef CONFIG_X86_SMP 274 if (pm_idle == poll_idle && smp_num_siblings > 1) { 275 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 276 " performance may degrade.\n"); 277 } 278#endif 279 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 280 /* 281 * Skip, if setup has overridden idle. 282 * One CPU supports mwait => All CPUs supports mwait 283 */ 284 if (!pm_idle) { 285 printk(KERN_INFO "using mwait in idle threads.\n"); 286 pm_idle = mwait_idle; 287 } 288 } 289 selected = 1; 290} 291 292static int __init idle_setup(char *str) 293{ 294 if (!strcmp(str, "poll")) { 295 printk("using polling idle threads.\n"); 296 pm_idle = poll_idle; 297 } else if (!strcmp(str, "mwait")) 298 force_mwait = 1; 299 else 300 return -1; 301 302 boot_option_idle_override = 1; 303 return 0; 304} 305early_param("idle", idle_setup); 306 307void __show_registers(struct pt_regs *regs, int all) 308{ 309 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 310 unsigned long d0, d1, d2, d3, d6, d7; 311 unsigned long sp; 312 unsigned short ss, gs; 313 314 if (user_mode_vm(regs)) { 315 sp = regs->sp; 316 ss = regs->ss & 0xffff; 317 savesegment(gs, gs); 318 } else { 319 sp = (unsigned long) (®s->sp); 320 savesegment(ss, ss); 321 savesegment(gs, gs); 322 } 323 324 printk("\n"); 325 printk("Pid: %d, comm: %s %s (%s %.*s)\n", 326 task_pid_nr(current), current->comm, 327 print_tainted(), init_utsname()->release, 328 (int)strcspn(init_utsname()->version, " "), 329 init_utsname()->version); 330 331 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 332 (u16)regs->cs, regs->ip, regs->flags, 333 smp_processor_id()); 334 print_symbol("EIP is at %s\n", regs->ip); 335 336 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 337 regs->ax, regs->bx, regs->cx, regs->dx); 338 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", 339 regs->si, regs->di, regs->bp, sp); 340 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", 341 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); 342 343 if (!all) 344 return; 345 346 cr0 = read_cr0(); 347 cr2 = read_cr2(); 348 cr3 = read_cr3(); 349 cr4 = read_cr4_safe(); 350 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", 351 cr0, cr2, cr3, cr4); 352 353 get_debugreg(d0, 0); 354 get_debugreg(d1, 1); 355 get_debugreg(d2, 2); 356 get_debugreg(d3, 3); 357 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 358 d0, d1, d2, d3); 359 360 get_debugreg(d6, 6); 361 get_debugreg(d7, 7); 362 printk("DR6: %08lx DR7: %08lx\n", 363 d6, d7); 364} 365 366void show_regs(struct pt_regs *regs) 367{ 368 __show_registers(regs, 1); 369 show_trace(NULL, regs, ®s->sp, regs->bp); 370} 371 372/* 373 * This gets run with %bx containing the 374 * function to call, and %dx containing 375 * the "args". 376 */ 377extern void kernel_thread_helper(void); 378 379/* 380 * Create a kernel thread 381 */ 382int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 383{ 384 struct pt_regs regs; 385 386 memset(®s, 0, sizeof(regs)); 387 388 regs.bx = (unsigned long) fn; 389 regs.dx = (unsigned long) arg; 390 391 regs.ds = __USER_DS; 392 regs.es = __USER_DS; 393 regs.fs = __KERNEL_PERCPU; 394 regs.orig_ax = -1; 395 regs.ip = (unsigned long) kernel_thread_helper; 396 regs.cs = __KERNEL_CS | get_kernel_rpl(); 397 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; 398 399 /* Ok, create the new process.. */ 400 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 401} 402EXPORT_SYMBOL(kernel_thread); 403 404/* 405 * Free current thread data structures etc.. 406 */ 407void exit_thread(void) 408{ 409 /* The process may have allocated an io port bitmap... nuke it. */ 410 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { 411 struct task_struct *tsk = current; 412 struct thread_struct *t = &tsk->thread; 413 int cpu = get_cpu(); 414 struct tss_struct *tss = &per_cpu(init_tss, cpu); 415 416 kfree(t->io_bitmap_ptr); 417 t->io_bitmap_ptr = NULL; 418 clear_thread_flag(TIF_IO_BITMAP); 419 /* 420 * Careful, clear this in the TSS too: 421 */ 422 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); 423 t->io_bitmap_max = 0; 424 tss->io_bitmap_owner = NULL; 425 tss->io_bitmap_max = 0; 426 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 427 put_cpu(); 428 } 429} 430 431void flush_thread(void) 432{ 433 struct task_struct *tsk = current; 434 435 tsk->thread.debugreg0 = 0; 436 tsk->thread.debugreg1 = 0; 437 tsk->thread.debugreg2 = 0; 438 tsk->thread.debugreg3 = 0; 439 tsk->thread.debugreg6 = 0; 440 tsk->thread.debugreg7 = 0; 441 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 442 clear_tsk_thread_flag(tsk, TIF_DEBUG); 443 /* 444 * Forget coprocessor state.. 445 */ 446 clear_fpu(tsk); 447 clear_used_math(); 448} 449 450void release_thread(struct task_struct *dead_task) 451{ 452 BUG_ON(dead_task->mm); 453 release_vm86_irqs(dead_task); 454} 455 456/* 457 * This gets called before we allocate a new thread and copy 458 * the current task into it. 459 */ 460void prepare_to_copy(struct task_struct *tsk) 461{ 462 unlazy_fpu(tsk); 463} 464 465int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 466 unsigned long unused, 467 struct task_struct * p, struct pt_regs * regs) 468{ 469 struct pt_regs * childregs; 470 struct task_struct *tsk; 471 int err; 472 473 childregs = task_pt_regs(p); 474 *childregs = *regs; 475 childregs->ax = 0; 476 childregs->sp = sp; 477 478 p->thread.sp = (unsigned long) childregs; 479 p->thread.sp0 = (unsigned long) (childregs+1); 480 481 p->thread.ip = (unsigned long) ret_from_fork; 482 483 savesegment(gs, p->thread.gs); 484 485 tsk = current; 486 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 487 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, 488 IO_BITMAP_BYTES, GFP_KERNEL); 489 if (!p->thread.io_bitmap_ptr) { 490 p->thread.io_bitmap_max = 0; 491 return -ENOMEM; 492 } 493 set_tsk_thread_flag(p, TIF_IO_BITMAP); 494 } 495 496 err = 0; 497 498 /* 499 * Set a new TLS for the child thread? 500 */ 501 if (clone_flags & CLONE_SETTLS) 502 err = do_set_thread_area(p, -1, 503 (struct user_desc __user *)childregs->si, 0); 504 505 if (err && p->thread.io_bitmap_ptr) { 506 kfree(p->thread.io_bitmap_ptr); 507 p->thread.io_bitmap_max = 0; 508 } 509 return err; 510} 511 512void 513start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 514{ 515 __asm__("movl %0, %%gs" :: "r"(0)); 516 regs->fs = 0; 517 set_fs(USER_DS); 518 regs->ds = __USER_DS; 519 regs->es = __USER_DS; 520 regs->ss = __USER_DS; 521 regs->cs = __USER_CS; 522 regs->ip = new_ip; 523 regs->sp = new_sp; 524 /* 525 * Free the old FP and other extended state 526 */ 527 free_thread_xstate(current); 528} 529EXPORT_SYMBOL_GPL(start_thread); 530 531static void hard_disable_TSC(void) 532{ 533 write_cr4(read_cr4() | X86_CR4_TSD); 534} 535 536void disable_TSC(void) 537{ 538 preempt_disable(); 539 if (!test_and_set_thread_flag(TIF_NOTSC)) 540 /* 541 * Must flip the CPU state synchronously with 542 * TIF_NOTSC in the current running context. 543 */ 544 hard_disable_TSC(); 545 preempt_enable(); 546} 547 548static void hard_enable_TSC(void) 549{ 550 write_cr4(read_cr4() & ~X86_CR4_TSD); 551} 552 553static void enable_TSC(void) 554{ 555 preempt_disable(); 556 if (test_and_clear_thread_flag(TIF_NOTSC)) 557 /* 558 * Must flip the CPU state synchronously with 559 * TIF_NOTSC in the current running context. 560 */ 561 hard_enable_TSC(); 562 preempt_enable(); 563} 564 565int get_tsc_mode(unsigned long adr) 566{ 567 unsigned int val; 568 569 if (test_thread_flag(TIF_NOTSC)) 570 val = PR_TSC_SIGSEGV; 571 else 572 val = PR_TSC_ENABLE; 573 574 return put_user(val, (unsigned int __user *)adr); 575} 576 577int set_tsc_mode(unsigned int val) 578{ 579 if (val == PR_TSC_SIGSEGV) 580 disable_TSC(); 581 else if (val == PR_TSC_ENABLE) 582 enable_TSC(); 583 else 584 return -EINVAL; 585 586 return 0; 587} 588 589static noinline void 590__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 591 struct tss_struct *tss) 592{ 593 struct thread_struct *prev, *next; 594 unsigned long debugctl; 595 596 prev = &prev_p->thread; 597 next = &next_p->thread; 598 599 debugctl = prev->debugctlmsr; 600 if (next->ds_area_msr != prev->ds_area_msr) { 601 /* we clear debugctl to make sure DS 602 * is not in use when we change it */ 603 debugctl = 0; 604 update_debugctlmsr(0); 605 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); 606 } 607 608 if (next->debugctlmsr != debugctl) 609 update_debugctlmsr(next->debugctlmsr); 610 611 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { 612 set_debugreg(next->debugreg0, 0); 613 set_debugreg(next->debugreg1, 1); 614 set_debugreg(next->debugreg2, 2); 615 set_debugreg(next->debugreg3, 3); 616 /* no 4 and 5 */ 617 set_debugreg(next->debugreg6, 6); 618 set_debugreg(next->debugreg7, 7); 619 } 620 621 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 622 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 623 /* prev and next are different */ 624 if (test_tsk_thread_flag(next_p, TIF_NOTSC)) 625 hard_disable_TSC(); 626 else 627 hard_enable_TSC(); 628 } 629 630#ifdef X86_BTS 631 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 632 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 633 634 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 635 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 636#endif 637 638 639 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 640 /* 641 * Disable the bitmap via an invalid offset. We still cache 642 * the previous bitmap owner and the IO bitmap contents: 643 */ 644 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 645 return; 646 } 647 648 if (likely(next == tss->io_bitmap_owner)) { 649 /* 650 * Previous owner of the bitmap (hence the bitmap content) 651 * matches the next task, we dont have to do anything but 652 * to set a valid offset in the TSS: 653 */ 654 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 655 return; 656 } 657 /* 658 * Lazy TSS's I/O bitmap copy. We set an invalid offset here 659 * and we let the task to get a GPF in case an I/O instruction 660 * is performed. The handler of the GPF will verify that the 661 * faulting task has a valid I/O bitmap and, it true, does the 662 * real copy and restart the instruction. This will save us 663 * redundant copies when the currently switched task does not 664 * perform any I/O during its timeslice. 665 */ 666 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; 667} 668 669/* 670 * switch_to(x,yn) should switch tasks from x to y. 671 * 672 * We fsave/fwait so that an exception goes off at the right time 673 * (as a call from the fsave or fwait in effect) rather than to 674 * the wrong process. Lazy FP saving no longer makes any sense 675 * with modern CPU's, and this simplifies a lot of things (SMP 676 * and UP become the same). 677 * 678 * NOTE! We used to use the x86 hardware context switching. The 679 * reason for not using it any more becomes apparent when you 680 * try to recover gracefully from saved state that is no longer 681 * valid (stale segment register values in particular). With the 682 * hardware task-switch, there is no way to fix up bad state in 683 * a reasonable manner. 684 * 685 * The fact that Intel documents the hardware task-switching to 686 * be slow is a fairly red herring - this code is not noticeably 687 * faster. However, there _is_ some room for improvement here, 688 * so the performance issues may eventually be a valid point. 689 * More important, however, is the fact that this allows us much 690 * more flexibility. 691 * 692 * The return value (in %ax) will be the "prev" task after 693 * the task-switch, and shows up in ret_from_fork in entry.S, 694 * for example. 695 */ 696struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) 697{ 698 struct thread_struct *prev = &prev_p->thread, 699 *next = &next_p->thread; 700 int cpu = smp_processor_id(); 701 struct tss_struct *tss = &per_cpu(init_tss, cpu); 702 703 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 704 705 __unlazy_fpu(prev_p); 706 707 708 /* we're going to use this soon, after a few expensive things */ 709 if (next_p->fpu_counter > 5) 710 prefetch(next->xstate); 711 712 /* 713 * Reload esp0. 714 */ 715 load_sp0(tss, next); 716 717 /* 718 * Save away %gs. No need to save %fs, as it was saved on the 719 * stack on entry. No need to save %es and %ds, as those are 720 * always kernel segments while inside the kernel. Doing this 721 * before setting the new TLS descriptors avoids the situation 722 * where we temporarily have non-reloadable segments in %fs 723 * and %gs. This could be an issue if the NMI handler ever 724 * used %fs or %gs (it does not today), or if the kernel is 725 * running inside of a hypervisor layer. 726 */ 727 savesegment(gs, prev->gs); 728 729 /* 730 * Load the per-thread Thread-Local Storage descriptor. 731 */ 732 load_TLS(next, cpu); 733 734 /* 735 * Restore IOPL if needed. In normal use, the flags restore 736 * in the switch assembly will handle this. But if the kernel 737 * is running virtualized at a non-zero CPL, the popf will 738 * not restore flags, so it must be done in a separate step. 739 */ 740 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) 741 set_iopl_mask(next->iopl); 742 743 /* 744 * Now maybe handle debug registers and/or IO bitmaps 745 */ 746 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || 747 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 748 __switch_to_xtra(prev_p, next_p, tss); 749 750 /* 751 * Leave lazy mode, flushing any hypercalls made here. 752 * This must be done before restoring TLS segments so 753 * the GDT and LDT are properly updated, and must be 754 * done before math_state_restore, so the TS bit is up 755 * to date. 756 */ 757 arch_leave_lazy_cpu_mode(); 758 759 /* If the task has used fpu the last 5 timeslices, just do a full 760 * restore of the math state immediately to avoid the trap; the 761 * chances of needing FPU soon are obviously high now 762 */ 763 if (next_p->fpu_counter > 5) 764 math_state_restore(); 765 766 /* 767 * Restore %gs if needed (which is common) 768 */ 769 if (prev->gs | next->gs) 770 loadsegment(gs, next->gs); 771 772 x86_write_percpu(current_task, next_p); 773 774 return prev_p; 775} 776 777asmlinkage int sys_fork(struct pt_regs regs) 778{ 779 return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 780} 781 782asmlinkage int sys_clone(struct pt_regs regs) 783{ 784 unsigned long clone_flags; 785 unsigned long newsp; 786 int __user *parent_tidptr, *child_tidptr; 787 788 clone_flags = regs.bx; 789 newsp = regs.cx; 790 parent_tidptr = (int __user *)regs.dx; 791 child_tidptr = (int __user *)regs.di; 792 if (!newsp) 793 newsp = regs.sp; 794 return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); 795} 796 797/* 798 * This is trivial, and on the face of it looks like it 799 * could equally well be done in user mode. 800 * 801 * Not so, for quite unobvious reasons - register pressure. 802 * In user mode vfork() cannot have a stack frame, and if 803 * done by calling the "clone()" system call directly, you 804 * do not have enough call-clobbered registers to hold all 805 * the information you need. 806 */ 807asmlinkage int sys_vfork(struct pt_regs regs) 808{ 809 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 810} 811 812/* 813 * sys_execve() executes a new program. 814 */ 815asmlinkage int sys_execve(struct pt_regs regs) 816{ 817 int error; 818 char * filename; 819 820 filename = getname((char __user *) regs.bx); 821 error = PTR_ERR(filename); 822 if (IS_ERR(filename)) 823 goto out; 824 error = do_execve(filename, 825 (char __user * __user *) regs.cx, 826 (char __user * __user *) regs.dx, 827 ®s); 828 if (error == 0) { 829 /* Make sure we don't return using sysenter.. */ 830 set_thread_flag(TIF_IRET); 831 } 832 putname(filename); 833out: 834 return error; 835} 836 837#define top_esp (THREAD_SIZE - sizeof(unsigned long)) 838#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) 839 840unsigned long get_wchan(struct task_struct *p) 841{ 842 unsigned long bp, sp, ip; 843 unsigned long stack_page; 844 int count = 0; 845 if (!p || p == current || p->state == TASK_RUNNING) 846 return 0; 847 stack_page = (unsigned long)task_stack_page(p); 848 sp = p->thread.sp; 849 if (!stack_page || sp < stack_page || sp > top_esp+stack_page) 850 return 0; 851 /* include/asm-i386/system.h:switch_to() pushes bp last. */ 852 bp = *(unsigned long *) sp; 853 do { 854 if (bp < stack_page || bp > top_ebp+stack_page) 855 return 0; 856 ip = *(unsigned long *) (bp+4); 857 if (!in_sched_functions(ip)) 858 return ip; 859 bp = *(unsigned long *) bp; 860 } while (count++ < 16); 861 return 0; 862} 863 864unsigned long arch_align_stack(unsigned long sp) 865{ 866 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 867 sp -= get_random_int() % 8192; 868 return sp & ~0xf; 869} 870 871unsigned long arch_randomize_brk(struct mm_struct *mm) 872{ 873 unsigned long range_end = mm->brk + 0x02000000; 874 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 875} 876