process_32.c revision 92bc2056855b3250bf6fd5849f05f88d85839efa
1/* 2 * Copyright (C) 1995 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * Gareth Hughes <gareth@valinux.com>, May 2000 6 */ 7 8/* 9 * This file handles the architecture-dependent parts of process handling.. 10 */ 11 12#include <stdarg.h> 13 14#include <linux/cpu.h> 15#include <linux/errno.h> 16#include <linux/sched.h> 17#include <linux/fs.h> 18#include <linux/kernel.h> 19#include <linux/mm.h> 20#include <linux/elfcore.h> 21#include <linux/smp.h> 22#include <linux/stddef.h> 23#include <linux/slab.h> 24#include <linux/vmalloc.h> 25#include <linux/user.h> 26#include <linux/interrupt.h> 27#include <linux/utsname.h> 28#include <linux/delay.h> 29#include <linux/reboot.h> 30#include <linux/init.h> 31#include <linux/mc146818rtc.h> 32#include <linux/module.h> 33#include <linux/kallsyms.h> 34#include <linux/ptrace.h> 35#include <linux/random.h> 36#include <linux/personality.h> 37#include <linux/tick.h> 38#include <linux/percpu.h> 39 40#include <asm/uaccess.h> 41#include <asm/pgtable.h> 42#include <asm/system.h> 43#include <asm/io.h> 44#include <asm/ldt.h> 45#include <asm/processor.h> 46#include <asm/i387.h> 47#include <asm/desc.h> 48#include <asm/vm86.h> 49#ifdef CONFIG_MATH_EMULATION 50#include <asm/math_emu.h> 51#endif 52 53#include <linux/err.h> 54 55#include <asm/tlbflush.h> 56#include <asm/cpu.h> 57#include <asm/kdebug.h> 58 59asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 60 61static int hlt_counter; 62 63unsigned long boot_option_idle_override = 0; 64EXPORT_SYMBOL(boot_option_idle_override); 65 66DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 67EXPORT_PER_CPU_SYMBOL(current_task); 68 69DEFINE_PER_CPU(int, cpu_number); 70EXPORT_PER_CPU_SYMBOL(cpu_number); 71 72/* 73 * Return saved PC of a blocked thread. 74 */ 75unsigned long thread_saved_pc(struct task_struct *tsk) 76{ 77 return ((unsigned long *)tsk->thread.sp)[3]; 78} 79 80/* 81 * Powermanagement idle function, if any.. 82 */ 83void (*pm_idle)(void); 84EXPORT_SYMBOL(pm_idle); 85 86void disable_hlt(void) 87{ 88 hlt_counter++; 89} 90 91EXPORT_SYMBOL(disable_hlt); 92 93void enable_hlt(void) 94{ 95 hlt_counter--; 96} 97 98EXPORT_SYMBOL(enable_hlt); 99 100/* 101 * We use this if we don't have any better 102 * idle routine.. 103 */ 104void default_idle(void) 105{ 106 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 107 current_thread_info()->status &= ~TS_POLLING; 108 /* 109 * TS_POLLING-cleared state must be visible before we 110 * test NEED_RESCHED: 111 */ 112 smp_mb(); 113 114 local_irq_disable(); 115 if (!need_resched()) { 116 ktime_t t0, t1; 117 u64 t0n, t1n; 118 119 t0 = ktime_get(); 120 t0n = ktime_to_ns(t0); 121 safe_halt(); /* enables interrupts racelessly */ 122 local_irq_disable(); 123 t1 = ktime_get(); 124 t1n = ktime_to_ns(t1); 125 sched_clock_idle_wakeup_event(t1n - t0n); 126 } 127 local_irq_enable(); 128 current_thread_info()->status |= TS_POLLING; 129 } else { 130 /* loop is done by the caller */ 131 cpu_relax(); 132 } 133} 134#ifdef CONFIG_APM_MODULE 135EXPORT_SYMBOL(default_idle); 136#endif 137 138/* 139 * On SMP it's slightly faster (but much more power-consuming!) 140 * to poll the ->work.need_resched flag instead of waiting for the 141 * cross-CPU IPI to arrive. Use this option with caution. 142 */ 143static void poll_idle(void) 144{ 145 cpu_relax(); 146} 147 148#ifdef CONFIG_HOTPLUG_CPU 149#include <asm/nmi.h> 150/* We don't actually take CPU down, just spin without interrupts. */ 151static inline void play_dead(void) 152{ 153 /* This must be done before dead CPU ack */ 154 cpu_exit_clear(); 155 wbinvd(); 156 mb(); 157 /* Ack it */ 158 __get_cpu_var(cpu_state) = CPU_DEAD; 159 160 /* 161 * With physical CPU hotplug, we should halt the cpu 162 */ 163 local_irq_disable(); 164 while (1) 165 halt(); 166} 167#else 168static inline void play_dead(void) 169{ 170 BUG(); 171} 172#endif /* CONFIG_HOTPLUG_CPU */ 173 174/* 175 * The idle thread. There's no useful work to be 176 * done, so just try to conserve power and have a 177 * low exit latency (ie sit in a loop waiting for 178 * somebody to say that they'd like to reschedule) 179 */ 180void cpu_idle(void) 181{ 182 int cpu = smp_processor_id(); 183 184 current_thread_info()->status |= TS_POLLING; 185 186 /* endless idle loop with no priority at all */ 187 while (1) { 188 tick_nohz_stop_sched_tick(); 189 while (!need_resched()) { 190 void (*idle)(void); 191 192 check_pgt_cache(); 193 rmb(); 194 idle = pm_idle; 195 196 if (rcu_pending(cpu)) 197 rcu_check_callbacks(cpu, 0); 198 199 if (!idle) 200 idle = default_idle; 201 202 if (cpu_is_offline(cpu)) 203 play_dead(); 204 205 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 206 idle(); 207 } 208 tick_nohz_restart_sched_tick(); 209 preempt_enable_no_resched(); 210 schedule(); 211 preempt_disable(); 212 } 213} 214 215static void do_nothing(void *unused) 216{ 217} 218 219/* 220 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of 221 * pm_idle and update to new pm_idle value. Required while changing pm_idle 222 * handler on SMP systems. 223 * 224 * Caller must have changed pm_idle to the new value before the call. Old 225 * pm_idle value will not be used by any CPU after the return of this function. 226 */ 227void cpu_idle_wait(void) 228{ 229 smp_mb(); 230 /* kick all the CPUs so that they exit out of pm_idle */ 231 smp_call_function(do_nothing, NULL, 0, 1); 232} 233EXPORT_SYMBOL_GPL(cpu_idle_wait); 234 235/* 236 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, 237 * which can obviate IPI to trigger checking of need_resched. 238 * We execute MONITOR against need_resched and enter optimized wait state 239 * through MWAIT. Whenever someone changes need_resched, we would be woken 240 * up from MWAIT (without an IPI). 241 * 242 * New with Core Duo processors, MWAIT can take some hints based on CPU 243 * capability. 244 */ 245void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 246{ 247 if (!need_resched()) { 248 __monitor((void *)¤t_thread_info()->flags, 0, 0); 249 smp_mb(); 250 if (!need_resched()) 251 __mwait(ax, cx); 252 } 253} 254 255/* Default MONITOR/MWAIT with no hints, used for default C1 state */ 256static void mwait_idle(void) 257{ 258 local_irq_enable(); 259 mwait_idle_with_hints(0, 0); 260} 261 262static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 263{ 264 if (force_mwait) 265 return 1; 266 /* Any C1 states supported? */ 267 return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; 268} 269 270void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 271{ 272 static int selected; 273 274 if (selected) 275 return; 276#ifdef CONFIG_X86_SMP 277 if (pm_idle == poll_idle && smp_num_siblings > 1) { 278 printk(KERN_WARNING "WARNING: polling idle and HT enabled," 279 " performance may degrade.\n"); 280 } 281#endif 282 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 283 /* 284 * Skip, if setup has overridden idle. 285 * One CPU supports mwait => All CPUs supports mwait 286 */ 287 if (!pm_idle) { 288 printk(KERN_INFO "using mwait in idle threads.\n"); 289 pm_idle = mwait_idle; 290 } 291 } 292 selected = 1; 293} 294 295static int __init idle_setup(char *str) 296{ 297 if (!strcmp(str, "poll")) { 298 printk("using polling idle threads.\n"); 299 pm_idle = poll_idle; 300 } else if (!strcmp(str, "mwait")) 301 force_mwait = 1; 302 else 303 return -1; 304 305 boot_option_idle_override = 1; 306 return 0; 307} 308early_param("idle", idle_setup); 309 310void __show_registers(struct pt_regs *regs, int all) 311{ 312 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 313 unsigned long d0, d1, d2, d3, d6, d7; 314 unsigned long sp; 315 unsigned short ss, gs; 316 317 if (user_mode_vm(regs)) { 318 sp = regs->sp; 319 ss = regs->ss & 0xffff; 320 savesegment(gs, gs); 321 } else { 322 sp = (unsigned long) (®s->sp); 323 savesegment(ss, ss); 324 savesegment(gs, gs); 325 } 326 327 printk("\n"); 328 printk("Pid: %d, comm: %s %s (%s %.*s)\n", 329 task_pid_nr(current), current->comm, 330 print_tainted(), init_utsname()->release, 331 (int)strcspn(init_utsname()->version, " "), 332 init_utsname()->version); 333 334 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 335 (u16)regs->cs, regs->ip, regs->flags, 336 smp_processor_id()); 337 print_symbol("EIP is at %s\n", regs->ip); 338 339 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 340 regs->ax, regs->bx, regs->cx, regs->dx); 341 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", 342 regs->si, regs->di, regs->bp, sp); 343 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", 344 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); 345 346 if (!all) 347 return; 348 349 cr0 = read_cr0(); 350 cr2 = read_cr2(); 351 cr3 = read_cr3(); 352 cr4 = read_cr4_safe(); 353 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", 354 cr0, cr2, cr3, cr4); 355 356 get_debugreg(d0, 0); 357 get_debugreg(d1, 1); 358 get_debugreg(d2, 2); 359 get_debugreg(d3, 3); 360 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 361 d0, d1, d2, d3); 362 363 get_debugreg(d6, 6); 364 get_debugreg(d7, 7); 365 printk("DR6: %08lx DR7: %08lx\n", 366 d6, d7); 367} 368 369void show_regs(struct pt_regs *regs) 370{ 371 __show_registers(regs, 1); 372 show_trace(NULL, regs, ®s->sp, regs->bp); 373} 374 375/* 376 * This gets run with %bx containing the 377 * function to call, and %dx containing 378 * the "args". 379 */ 380extern void kernel_thread_helper(void); 381 382/* 383 * Create a kernel thread 384 */ 385int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 386{ 387 struct pt_regs regs; 388 389 memset(®s, 0, sizeof(regs)); 390 391 regs.bx = (unsigned long) fn; 392 regs.dx = (unsigned long) arg; 393 394 regs.ds = __USER_DS; 395 regs.es = __USER_DS; 396 regs.fs = __KERNEL_PERCPU; 397 regs.orig_ax = -1; 398 regs.ip = (unsigned long) kernel_thread_helper; 399 regs.cs = __KERNEL_CS | get_kernel_rpl(); 400 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; 401 402 /* Ok, create the new process.. */ 403 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 404} 405EXPORT_SYMBOL(kernel_thread); 406 407/* 408 * Free current thread data structures etc.. 409 */ 410void exit_thread(void) 411{ 412 /* The process may have allocated an io port bitmap... nuke it. */ 413 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { 414 struct task_struct *tsk = current; 415 struct thread_struct *t = &tsk->thread; 416 int cpu = get_cpu(); 417 struct tss_struct *tss = &per_cpu(init_tss, cpu); 418 419 kfree(t->io_bitmap_ptr); 420 t->io_bitmap_ptr = NULL; 421 clear_thread_flag(TIF_IO_BITMAP); 422 /* 423 * Careful, clear this in the TSS too: 424 */ 425 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); 426 t->io_bitmap_max = 0; 427 tss->io_bitmap_owner = NULL; 428 tss->io_bitmap_max = 0; 429 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 430 put_cpu(); 431 } 432} 433 434void flush_thread(void) 435{ 436 struct task_struct *tsk = current; 437 438 tsk->thread.debugreg0 = 0; 439 tsk->thread.debugreg1 = 0; 440 tsk->thread.debugreg2 = 0; 441 tsk->thread.debugreg3 = 0; 442 tsk->thread.debugreg6 = 0; 443 tsk->thread.debugreg7 = 0; 444 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 445 clear_tsk_thread_flag(tsk, TIF_DEBUG); 446 /* 447 * Forget coprocessor state.. 448 */ 449 clear_fpu(tsk); 450 clear_used_math(); 451} 452 453void release_thread(struct task_struct *dead_task) 454{ 455 BUG_ON(dead_task->mm); 456 release_vm86_irqs(dead_task); 457} 458 459/* 460 * This gets called before we allocate a new thread and copy 461 * the current task into it. 462 */ 463void prepare_to_copy(struct task_struct *tsk) 464{ 465 unlazy_fpu(tsk); 466} 467 468int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 469 unsigned long unused, 470 struct task_struct * p, struct pt_regs * regs) 471{ 472 struct pt_regs * childregs; 473 struct task_struct *tsk; 474 int err; 475 476 childregs = task_pt_regs(p); 477 *childregs = *regs; 478 childregs->ax = 0; 479 childregs->sp = sp; 480 481 p->thread.sp = (unsigned long) childregs; 482 p->thread.sp0 = (unsigned long) (childregs+1); 483 484 p->thread.ip = (unsigned long) ret_from_fork; 485 486 savesegment(gs, p->thread.gs); 487 488 tsk = current; 489 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 490 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, 491 IO_BITMAP_BYTES, GFP_KERNEL); 492 if (!p->thread.io_bitmap_ptr) { 493 p->thread.io_bitmap_max = 0; 494 return -ENOMEM; 495 } 496 set_tsk_thread_flag(p, TIF_IO_BITMAP); 497 } 498 499 err = 0; 500 501 /* 502 * Set a new TLS for the child thread? 503 */ 504 if (clone_flags & CLONE_SETTLS) 505 err = do_set_thread_area(p, -1, 506 (struct user_desc __user *)childregs->si, 0); 507 508 if (err && p->thread.io_bitmap_ptr) { 509 kfree(p->thread.io_bitmap_ptr); 510 p->thread.io_bitmap_max = 0; 511 } 512 return err; 513} 514 515#ifdef CONFIG_SECCOMP 516static void hard_disable_TSC(void) 517{ 518 write_cr4(read_cr4() | X86_CR4_TSD); 519} 520void disable_TSC(void) 521{ 522 preempt_disable(); 523 if (!test_and_set_thread_flag(TIF_NOTSC)) 524 /* 525 * Must flip the CPU state synchronously with 526 * TIF_NOTSC in the current running context. 527 */ 528 hard_disable_TSC(); 529 preempt_enable(); 530} 531static void hard_enable_TSC(void) 532{ 533 write_cr4(read_cr4() & ~X86_CR4_TSD); 534} 535#endif /* CONFIG_SECCOMP */ 536 537static noinline void 538__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 539 struct tss_struct *tss) 540{ 541 struct thread_struct *prev, *next; 542 unsigned long debugctl; 543 544 prev = &prev_p->thread; 545 next = &next_p->thread; 546 547 debugctl = prev->debugctlmsr; 548 if (next->ds_area_msr != prev->ds_area_msr) { 549 /* we clear debugctl to make sure DS 550 * is not in use when we change it */ 551 debugctl = 0; 552 wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); 553 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); 554 } 555 556 if (next->debugctlmsr != debugctl) 557 wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0); 558 559 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { 560 set_debugreg(next->debugreg0, 0); 561 set_debugreg(next->debugreg1, 1); 562 set_debugreg(next->debugreg2, 2); 563 set_debugreg(next->debugreg3, 3); 564 /* no 4 and 5 */ 565 set_debugreg(next->debugreg6, 6); 566 set_debugreg(next->debugreg7, 7); 567 } 568 569#ifdef CONFIG_SECCOMP 570 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 571 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 572 /* prev and next are different */ 573 if (test_tsk_thread_flag(next_p, TIF_NOTSC)) 574 hard_disable_TSC(); 575 else 576 hard_enable_TSC(); 577 } 578#endif 579 580#ifdef X86_BTS 581 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 582 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 583 584 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 585 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 586#endif 587 588 589 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 590 /* 591 * Disable the bitmap via an invalid offset. We still cache 592 * the previous bitmap owner and the IO bitmap contents: 593 */ 594 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 595 return; 596 } 597 598 if (likely(next == tss->io_bitmap_owner)) { 599 /* 600 * Previous owner of the bitmap (hence the bitmap content) 601 * matches the next task, we dont have to do anything but 602 * to set a valid offset in the TSS: 603 */ 604 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 605 return; 606 } 607 /* 608 * Lazy TSS's I/O bitmap copy. We set an invalid offset here 609 * and we let the task to get a GPF in case an I/O instruction 610 * is performed. The handler of the GPF will verify that the 611 * faulting task has a valid I/O bitmap and, it true, does the 612 * real copy and restart the instruction. This will save us 613 * redundant copies when the currently switched task does not 614 * perform any I/O during its timeslice. 615 */ 616 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; 617} 618 619/* 620 * switch_to(x,yn) should switch tasks from x to y. 621 * 622 * We fsave/fwait so that an exception goes off at the right time 623 * (as a call from the fsave or fwait in effect) rather than to 624 * the wrong process. Lazy FP saving no longer makes any sense 625 * with modern CPU's, and this simplifies a lot of things (SMP 626 * and UP become the same). 627 * 628 * NOTE! We used to use the x86 hardware context switching. The 629 * reason for not using it any more becomes apparent when you 630 * try to recover gracefully from saved state that is no longer 631 * valid (stale segment register values in particular). With the 632 * hardware task-switch, there is no way to fix up bad state in 633 * a reasonable manner. 634 * 635 * The fact that Intel documents the hardware task-switching to 636 * be slow is a fairly red herring - this code is not noticeably 637 * faster. However, there _is_ some room for improvement here, 638 * so the performance issues may eventually be a valid point. 639 * More important, however, is the fact that this allows us much 640 * more flexibility. 641 * 642 * The return value (in %ax) will be the "prev" task after 643 * the task-switch, and shows up in ret_from_fork in entry.S, 644 * for example. 645 */ 646struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) 647{ 648 struct thread_struct *prev = &prev_p->thread, 649 *next = &next_p->thread; 650 int cpu = smp_processor_id(); 651 struct tss_struct *tss = &per_cpu(init_tss, cpu); 652 653 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 654 655 __unlazy_fpu(prev_p); 656 657 658 /* we're going to use this soon, after a few expensive things */ 659 if (next_p->fpu_counter > 5) 660 prefetch(&next->i387.fxsave); 661 662 /* 663 * Reload esp0. 664 */ 665 load_sp0(tss, next); 666 667 /* 668 * Save away %gs. No need to save %fs, as it was saved on the 669 * stack on entry. No need to save %es and %ds, as those are 670 * always kernel segments while inside the kernel. Doing this 671 * before setting the new TLS descriptors avoids the situation 672 * where we temporarily have non-reloadable segments in %fs 673 * and %gs. This could be an issue if the NMI handler ever 674 * used %fs or %gs (it does not today), or if the kernel is 675 * running inside of a hypervisor layer. 676 */ 677 savesegment(gs, prev->gs); 678 679 /* 680 * Load the per-thread Thread-Local Storage descriptor. 681 */ 682 load_TLS(next, cpu); 683 684 /* 685 * Restore IOPL if needed. In normal use, the flags restore 686 * in the switch assembly will handle this. But if the kernel 687 * is running virtualized at a non-zero CPL, the popf will 688 * not restore flags, so it must be done in a separate step. 689 */ 690 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) 691 set_iopl_mask(next->iopl); 692 693 /* 694 * Now maybe handle debug registers and/or IO bitmaps 695 */ 696 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || 697 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 698 __switch_to_xtra(prev_p, next_p, tss); 699 700 /* 701 * Leave lazy mode, flushing any hypercalls made here. 702 * This must be done before restoring TLS segments so 703 * the GDT and LDT are properly updated, and must be 704 * done before math_state_restore, so the TS bit is up 705 * to date. 706 */ 707 arch_leave_lazy_cpu_mode(); 708 709 /* If the task has used fpu the last 5 timeslices, just do a full 710 * restore of the math state immediately to avoid the trap; the 711 * chances of needing FPU soon are obviously high now 712 */ 713 if (next_p->fpu_counter > 5) 714 math_state_restore(); 715 716 /* 717 * Restore %gs if needed (which is common) 718 */ 719 if (prev->gs | next->gs) 720 loadsegment(gs, next->gs); 721 722 x86_write_percpu(current_task, next_p); 723 724 return prev_p; 725} 726 727asmlinkage int sys_fork(struct pt_regs regs) 728{ 729 return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 730} 731 732asmlinkage int sys_clone(struct pt_regs regs) 733{ 734 unsigned long clone_flags; 735 unsigned long newsp; 736 int __user *parent_tidptr, *child_tidptr; 737 738 clone_flags = regs.bx; 739 newsp = regs.cx; 740 parent_tidptr = (int __user *)regs.dx; 741 child_tidptr = (int __user *)regs.di; 742 if (!newsp) 743 newsp = regs.sp; 744 return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); 745} 746 747/* 748 * This is trivial, and on the face of it looks like it 749 * could equally well be done in user mode. 750 * 751 * Not so, for quite unobvious reasons - register pressure. 752 * In user mode vfork() cannot have a stack frame, and if 753 * done by calling the "clone()" system call directly, you 754 * do not have enough call-clobbered registers to hold all 755 * the information you need. 756 */ 757asmlinkage int sys_vfork(struct pt_regs regs) 758{ 759 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); 760} 761 762/* 763 * sys_execve() executes a new program. 764 */ 765asmlinkage int sys_execve(struct pt_regs regs) 766{ 767 int error; 768 char * filename; 769 770 filename = getname((char __user *) regs.bx); 771 error = PTR_ERR(filename); 772 if (IS_ERR(filename)) 773 goto out; 774 error = do_execve(filename, 775 (char __user * __user *) regs.cx, 776 (char __user * __user *) regs.dx, 777 ®s); 778 if (error == 0) { 779 /* Make sure we don't return using sysenter.. */ 780 set_thread_flag(TIF_IRET); 781 } 782 putname(filename); 783out: 784 return error; 785} 786 787#define top_esp (THREAD_SIZE - sizeof(unsigned long)) 788#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) 789 790unsigned long get_wchan(struct task_struct *p) 791{ 792 unsigned long bp, sp, ip; 793 unsigned long stack_page; 794 int count = 0; 795 if (!p || p == current || p->state == TASK_RUNNING) 796 return 0; 797 stack_page = (unsigned long)task_stack_page(p); 798 sp = p->thread.sp; 799 if (!stack_page || sp < stack_page || sp > top_esp+stack_page) 800 return 0; 801 /* include/asm-i386/system.h:switch_to() pushes bp last. */ 802 bp = *(unsigned long *) sp; 803 do { 804 if (bp < stack_page || bp > top_ebp+stack_page) 805 return 0; 806 ip = *(unsigned long *) (bp+4); 807 if (!in_sched_functions(ip)) 808 return ip; 809 bp = *(unsigned long *) bp; 810 } while (count++ < 16); 811 return 0; 812} 813 814unsigned long arch_align_stack(unsigned long sp) 815{ 816 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 817 sp -= get_random_int() % 8192; 818 return sp & ~0xf; 819} 820 821unsigned long arch_randomize_brk(struct mm_struct *mm) 822{ 823 unsigned long range_end = mm->brk + 0x02000000; 824 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 825} 826