1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/sched.h> 16#include <linux/preempt.h> 17#include <linux/module.h> 18#include <linux/fs.h> 19#include <linux/kprobes.h> 20#include <linux/elfcore.h> 21#include <linux/tick.h> 22#include <linux/init.h> 23#include <linux/mm.h> 24#include <linux/compat.h> 25#include <linux/hardirq.h> 26#include <linux/syscalls.h> 27#include <linux/kernel.h> 28#include <linux/tracehook.h> 29#include <linux/signal.h> 30#include <asm/stack.h> 31#include <asm/switch_to.h> 32#include <asm/homecache.h> 33#include <asm/syscalls.h> 34#include <asm/traps.h> 35#include <asm/setup.h> 36#ifdef CONFIG_HARDWALL 37#include <asm/hardwall.h> 38#endif 39#include <arch/chip.h> 40#include <arch/abi.h> 41#include <arch/sim_def.h> 42 43 44/* 45 * Use the (x86) "idle=poll" option to prefer low latency when leaving the 46 * idle loop over low power while in the idle loop, e.g. if we have 47 * one thread per core and we want to get threads out of futex waits fast. 48 */ 49static int no_idle_nap; 50static int __init idle_setup(char *str) 51{ 52 if (!str) 53 return -EINVAL; 54 55 if (!strcmp(str, "poll")) { 56 pr_info("using polling idle threads.\n"); 57 no_idle_nap = 1; 58 } else if (!strcmp(str, "halt")) 59 no_idle_nap = 0; 60 else 61 return -1; 62 63 return 0; 64} 65early_param("idle", idle_setup); 66 67/* 68 * The idle thread. There's no useful work to be 69 * done, so just try to conserve power and have a 70 * low exit latency (ie sit in a loop waiting for 71 * somebody to say that they'd like to reschedule) 72 */ 73void cpu_idle(void) 74{ 75 int cpu = smp_processor_id(); 76 77 78 current_thread_info()->status |= TS_POLLING; 79 80 if (no_idle_nap) { 81 while (1) { 82 while (!need_resched()) 83 cpu_relax(); 84 schedule(); 85 } 86 } 87 88 /* endless idle loop with no priority at all */ 89 while (1) { 90 tick_nohz_idle_enter(); 91 rcu_idle_enter(); 92 while (!need_resched()) { 93 if (cpu_is_offline(cpu)) 94 BUG(); /* no HOTPLUG_CPU */ 95 96 local_irq_disable(); 97 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 98 current_thread_info()->status &= ~TS_POLLING; 99 /* 100 * TS_POLLING-cleared state must be visible before we 101 * test NEED_RESCHED: 102 */ 103 smp_mb(); 104 105 if (!need_resched()) 106 _cpu_idle(); 107 else 108 local_irq_enable(); 109 current_thread_info()->status |= TS_POLLING; 110 } 111 rcu_idle_exit(); 112 tick_nohz_idle_exit(); 113 schedule_preempt_disabled(); 114 } 115} 116 117struct thread_info *alloc_thread_info_node(struct task_struct *task, int node) 118{ 119 struct page *page; 120 gfp_t flags = GFP_KERNEL; 121 122#ifdef CONFIG_DEBUG_STACK_USAGE 123 flags |= __GFP_ZERO; 124#endif 125 126 page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER); 127 if (!page) 128 return NULL; 129 130 return (struct thread_info *)page_address(page); 131} 132 133/* 134 * Free a thread_info node, and all of its derivative 135 * data structures. 136 */ 137void free_thread_info(struct thread_info *info) 138{ 139 struct single_step_state *step_state = info->step_state; 140 141#ifdef CONFIG_HARDWALL 142 /* 143 * We free a thread_info from the context of the task that has 144 * been scheduled next, so the original task is already dead. 145 * Calling deactivate here just frees up the data structures. 146 * If the task we're freeing held the last reference to a 147 * hardwall fd, it would have been released prior to this point 148 * anyway via exit_files(), and "hardwall" would be NULL by now. 149 */ 150 if (info->task->thread.hardwall) 151 hardwall_deactivate(info->task); 152#endif 153 154 if (step_state) { 155 156 /* 157 * FIXME: we don't munmap step_state->buffer 158 * because the mm_struct for this process (info->task->mm) 159 * has already been zeroed in exit_mm(). Keeping a 160 * reference to it here seems like a bad move, so this 161 * means we can't munmap() the buffer, and therefore if we 162 * ptrace multiple threads in a process, we will slowly 163 * leak user memory. (Note that as soon as the last 164 * thread in a process dies, we will reclaim all user 165 * memory including single-step buffers in the usual way.) 166 * We should either assign a kernel VA to this buffer 167 * somehow, or we should associate the buffer(s) with the 168 * mm itself so we can clean them up that way. 169 */ 170 kfree(step_state); 171 } 172 173 free_pages((unsigned long)info, THREAD_SIZE_ORDER); 174} 175 176static void save_arch_state(struct thread_struct *t); 177 178int copy_thread(unsigned long clone_flags, unsigned long sp, 179 unsigned long stack_size, 180 struct task_struct *p, struct pt_regs *regs) 181{ 182 struct pt_regs *childregs; 183 unsigned long ksp; 184 185 /* 186 * When creating a new kernel thread we pass sp as zero. 187 * Assign it to a reasonable value now that we have the stack. 188 */ 189 if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0)) 190 sp = KSTK_TOP(p); 191 192 /* 193 * Do not clone step state from the parent; each thread 194 * must make its own lazily. 195 */ 196 task_thread_info(p)->step_state = NULL; 197 198 /* 199 * Start new thread in ret_from_fork so it schedules properly 200 * and then return from interrupt like the parent. 201 */ 202 p->thread.pc = (unsigned long) ret_from_fork; 203 204 /* Save user stack top pointer so we can ID the stack vm area later. */ 205 p->thread.usp0 = sp; 206 207 /* Record the pid of the process that created this one. */ 208 p->thread.creator_pid = current->pid; 209 210 /* 211 * Copy the registers onto the kernel stack so the 212 * return-from-interrupt code will reload it into registers. 213 */ 214 childregs = task_pt_regs(p); 215 *childregs = *regs; 216 childregs->regs[0] = 0; /* return value is zero */ 217 childregs->sp = sp; /* override with new user stack pointer */ 218 219 /* 220 * If CLONE_SETTLS is set, set "tp" in the new task to "r4", 221 * which is passed in as arg #5 to sys_clone(). 222 */ 223 if (clone_flags & CLONE_SETTLS) 224 childregs->tp = regs->regs[4]; 225 226 /* 227 * Copy the callee-saved registers from the passed pt_regs struct 228 * into the context-switch callee-saved registers area. 229 * This way when we start the interrupt-return sequence, the 230 * callee-save registers will be correctly in registers, which 231 * is how we assume the compiler leaves them as we start doing 232 * the normal return-from-interrupt path after calling C code. 233 * Zero out the C ABI save area to mark the top of the stack. 234 */ 235 ksp = (unsigned long) childregs; 236 ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */ 237 ((long *)ksp)[0] = ((long *)ksp)[1] = 0; 238 ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long); 239 memcpy((void *)ksp, ®s->regs[CALLEE_SAVED_FIRST_REG], 240 CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long)); 241 ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */ 242 ((long *)ksp)[0] = ((long *)ksp)[1] = 0; 243 p->thread.ksp = ksp; 244 245#if CHIP_HAS_TILE_DMA() 246 /* 247 * No DMA in the new thread. We model this on the fact that 248 * fork() clears the pending signals, alarms, and aio for the child. 249 */ 250 memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state)); 251 memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb)); 252#endif 253 254#if CHIP_HAS_SN_PROC() 255 /* Likewise, the new thread is not running static processor code. */ 256 p->thread.sn_proc_running = 0; 257 memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb)); 258#endif 259 260#if CHIP_HAS_PROC_STATUS_SPR() 261 /* New thread has its miscellaneous processor state bits clear. */ 262 p->thread.proc_status = 0; 263#endif 264 265#ifdef CONFIG_HARDWALL 266 /* New thread does not own any networks. */ 267 p->thread.hardwall = NULL; 268#endif 269 270 271 /* 272 * Start the new thread with the current architecture state 273 * (user interrupt masks, etc.). 274 */ 275 save_arch_state(&p->thread); 276 277 return 0; 278} 279 280/* 281 * Return "current" if it looks plausible, or else a pointer to a dummy. 282 * This can be helpful if we are just trying to emit a clean panic. 283 */ 284struct task_struct *validate_current(void) 285{ 286 static struct task_struct corrupt = { .comm = "<corrupt>" }; 287 struct task_struct *tsk = current; 288 if (unlikely((unsigned long)tsk < PAGE_OFFSET || 289 (high_memory && (void *)tsk > high_memory) || 290 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { 291 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); 292 tsk = &corrupt; 293 } 294 return tsk; 295} 296 297/* Take and return the pointer to the previous task, for schedule_tail(). */ 298struct task_struct *sim_notify_fork(struct task_struct *prev) 299{ 300 struct task_struct *tsk = current; 301 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT | 302 (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS)); 303 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK | 304 (tsk->pid << _SIM_CONTROL_OPERATOR_BITS)); 305 return prev; 306} 307 308int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 309{ 310 struct pt_regs *ptregs = task_pt_regs(tsk); 311 elf_core_copy_regs(regs, ptregs); 312 return 1; 313} 314 315#if CHIP_HAS_TILE_DMA() 316 317/* Allow user processes to access the DMA SPRs */ 318void grant_dma_mpls(void) 319{ 320#if CONFIG_KERNEL_PL == 2 321 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); 322 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); 323#else 324 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); 325 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); 326#endif 327} 328 329/* Forbid user processes from accessing the DMA SPRs */ 330void restrict_dma_mpls(void) 331{ 332#if CONFIG_KERNEL_PL == 2 333 __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1); 334 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1); 335#else 336 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); 337 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); 338#endif 339} 340 341/* Pause the DMA engine, then save off its state registers. */ 342static void save_tile_dma_state(struct tile_dma_state *dma) 343{ 344 unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS); 345 unsigned long post_suspend_state; 346 347 /* If we're running, suspend the engine. */ 348 if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) 349 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); 350 351 /* 352 * Wait for the engine to idle, then save regs. Note that we 353 * want to record the "running" bit from before suspension, 354 * and the "done" bit from after, so that we can properly 355 * distinguish a case where the user suspended the engine from 356 * the case where the kernel suspended as part of the context 357 * swap. 358 */ 359 do { 360 post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS); 361 } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK); 362 363 dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR); 364 dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR); 365 dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR); 366 dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR); 367 dma->strides = __insn_mfspr(SPR_DMA_STRIDE); 368 dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE); 369 dma->byte = __insn_mfspr(SPR_DMA_BYTE); 370 dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) | 371 (post_suspend_state & SPR_DMA_STATUS__DONE_MASK); 372} 373 374/* Restart a DMA that was running before we were context-switched out. */ 375static void restore_tile_dma_state(struct thread_struct *t) 376{ 377 const struct tile_dma_state *dma = &t->tile_dma_state; 378 379 /* 380 * The only way to restore the done bit is to run a zero 381 * length transaction. 382 */ 383 if ((dma->status & SPR_DMA_STATUS__DONE_MASK) && 384 !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) { 385 __insn_mtspr(SPR_DMA_BYTE, 0); 386 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); 387 while (__insn_mfspr(SPR_DMA_USER_STATUS) & 388 SPR_DMA_STATUS__BUSY_MASK) 389 ; 390 } 391 392 __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src); 393 __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk); 394 __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest); 395 __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk); 396 __insn_mtspr(SPR_DMA_STRIDE, dma->strides); 397 __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size); 398 __insn_mtspr(SPR_DMA_BYTE, dma->byte); 399 400 /* 401 * Restart the engine if we were running and not done. 402 * Clear a pending async DMA fault that we were waiting on return 403 * to user space to execute, since we expect the DMA engine 404 * to regenerate those faults for us now. Note that we don't 405 * try to clear the TIF_ASYNC_TLB flag, since it's relatively 406 * harmless if set, and it covers both DMA and the SN processor. 407 */ 408 if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) { 409 t->dma_async_tlb.fault_num = 0; 410 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); 411 } 412} 413 414#endif 415 416static void save_arch_state(struct thread_struct *t) 417{ 418#if CHIP_HAS_SPLIT_INTR_MASK() 419 t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) | 420 ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32); 421#else 422 t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0); 423#endif 424 t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0); 425 t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1); 426 t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0); 427 t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1); 428 t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2); 429 t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3); 430 t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS); 431#if CHIP_HAS_PROC_STATUS_SPR() 432 t->proc_status = __insn_mfspr(SPR_PROC_STATUS); 433#endif 434#if !CHIP_HAS_FIXED_INTVEC_BASE() 435 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0); 436#endif 437#if CHIP_HAS_TILE_RTF_HWM() 438 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM); 439#endif 440#if CHIP_HAS_DSTREAM_PF() 441 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF); 442#endif 443} 444 445static void restore_arch_state(const struct thread_struct *t) 446{ 447#if CHIP_HAS_SPLIT_INTR_MASK() 448 __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask); 449 __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32); 450#else 451 __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask); 452#endif 453 __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]); 454 __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]); 455 __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]); 456 __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]); 457 __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]); 458 __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]); 459 __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0); 460#if CHIP_HAS_PROC_STATUS_SPR() 461 __insn_mtspr(SPR_PROC_STATUS, t->proc_status); 462#endif 463#if !CHIP_HAS_FIXED_INTVEC_BASE() 464 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base); 465#endif 466#if CHIP_HAS_TILE_RTF_HWM() 467 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm); 468#endif 469#if CHIP_HAS_DSTREAM_PF() 470 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf); 471#endif 472} 473 474 475void _prepare_arch_switch(struct task_struct *next) 476{ 477#if CHIP_HAS_SN_PROC() 478 int snctl; 479#endif 480#if CHIP_HAS_TILE_DMA() 481 struct tile_dma_state *dma = ¤t->thread.tile_dma_state; 482 if (dma->enabled) 483 save_tile_dma_state(dma); 484#endif 485#if CHIP_HAS_SN_PROC() 486 /* 487 * Suspend the static network processor if it was running. 488 * We do not suspend the fabric itself, just like we don't 489 * try to suspend the UDN. 490 */ 491 snctl = __insn_mfspr(SPR_SNCTL); 492 current->thread.sn_proc_running = 493 (snctl & SPR_SNCTL__FRZPROC_MASK) == 0; 494 if (current->thread.sn_proc_running) 495 __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK); 496#endif 497} 498 499 500struct task_struct *__sched _switch_to(struct task_struct *prev, 501 struct task_struct *next) 502{ 503 /* DMA state is already saved; save off other arch state. */ 504 save_arch_state(&prev->thread); 505 506#if CHIP_HAS_TILE_DMA() 507 /* 508 * Restore DMA in new task if desired. 509 * Note that it is only safe to restart here since interrupts 510 * are disabled, so we can't take any DMATLB miss or access 511 * interrupts before we have finished switching stacks. 512 */ 513 if (next->thread.tile_dma_state.enabled) { 514 restore_tile_dma_state(&next->thread); 515 grant_dma_mpls(); 516 } else { 517 restrict_dma_mpls(); 518 } 519#endif 520 521 /* Restore other arch state. */ 522 restore_arch_state(&next->thread); 523 524#if CHIP_HAS_SN_PROC() 525 /* 526 * Restart static network processor in the new process 527 * if it was running before. 528 */ 529 if (next->thread.sn_proc_running) { 530 int snctl = __insn_mfspr(SPR_SNCTL); 531 __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK); 532 } 533#endif 534 535#ifdef CONFIG_HARDWALL 536 /* Enable or disable access to the network registers appropriately. */ 537 if (prev->thread.hardwall != NULL) { 538 if (next->thread.hardwall == NULL) 539 restrict_network_mpls(); 540 } else if (next->thread.hardwall != NULL) { 541 grant_network_mpls(); 542 } 543#endif 544 545 /* 546 * Switch kernel SP, PC, and callee-saved registers. 547 * In the context of the new task, return the old task pointer 548 * (i.e. the task that actually called __switch_to). 549 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp. 550 */ 551 return __switch_to(prev, next, next_current_ksp0(next)); 552} 553 554/* 555 * This routine is called on return from interrupt if any of the 556 * TIF_WORK_MASK flags are set in thread_info->flags. It is 557 * entered with interrupts disabled so we don't miss an event 558 * that modified the thread_info flags. If any flag is set, we 559 * handle it and return, and the calling assembly code will 560 * re-disable interrupts, reload the thread flags, and call back 561 * if more flags need to be handled. 562 * 563 * We return whether we need to check the thread_info flags again 564 * or not. Note that we don't clear TIF_SINGLESTEP here, so it's 565 * important that it be tested last, and then claim that we don't 566 * need to recheck the flags. 567 */ 568int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) 569{ 570 /* If we enter in kernel mode, do nothing and exit the caller loop. */ 571 if (!user_mode(regs)) 572 return 0; 573 574 if (thread_info_flags & _TIF_NEED_RESCHED) { 575 schedule(); 576 return 1; 577 } 578#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 579 if (thread_info_flags & _TIF_ASYNC_TLB) { 580 do_async_page_fault(regs); 581 return 1; 582 } 583#endif 584 if (thread_info_flags & _TIF_SIGPENDING) { 585 do_signal(regs); 586 return 1; 587 } 588 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 589 clear_thread_flag(TIF_NOTIFY_RESUME); 590 tracehook_notify_resume(regs); 591 if (current->replacement_session_keyring) 592 key_replace_session_keyring(); 593 return 1; 594 } 595 if (thread_info_flags & _TIF_SINGLESTEP) { 596 single_step_once(regs); 597 return 0; 598 } 599 panic("work_pending: bad flags %#x\n", thread_info_flags); 600} 601 602/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */ 603SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 604 void __user *, parent_tidptr, void __user *, child_tidptr, 605 struct pt_regs *, regs) 606{ 607 if (!newsp) 608 newsp = regs->sp; 609 return do_fork(clone_flags, newsp, regs, 0, 610 parent_tidptr, child_tidptr); 611} 612 613/* 614 * sys_execve() executes a new program. 615 */ 616SYSCALL_DEFINE4(execve, const char __user *, path, 617 const char __user *const __user *, argv, 618 const char __user *const __user *, envp, 619 struct pt_regs *, regs) 620{ 621 long error; 622 char *filename; 623 624 filename = getname(path); 625 error = PTR_ERR(filename); 626 if (IS_ERR(filename)) 627 goto out; 628 error = do_execve(filename, argv, envp, regs); 629 putname(filename); 630 if (error == 0) 631 single_step_execve(); 632out: 633 return error; 634} 635 636#ifdef CONFIG_COMPAT 637long compat_sys_execve(const char __user *path, 638 compat_uptr_t __user *argv, 639 compat_uptr_t __user *envp, 640 struct pt_regs *regs) 641{ 642 long error; 643 char *filename; 644 645 filename = getname(path); 646 error = PTR_ERR(filename); 647 if (IS_ERR(filename)) 648 goto out; 649 error = compat_do_execve(filename, argv, envp, regs); 650 putname(filename); 651 if (error == 0) 652 single_step_execve(); 653out: 654 return error; 655} 656#endif 657 658unsigned long get_wchan(struct task_struct *p) 659{ 660 struct KBacktraceIterator kbt; 661 662 if (!p || p == current || p->state == TASK_RUNNING) 663 return 0; 664 665 for (KBacktraceIterator_init(&kbt, p, NULL); 666 !KBacktraceIterator_end(&kbt); 667 KBacktraceIterator_next(&kbt)) { 668 if (!in_sched_functions(kbt.it.pc)) 669 return kbt.it.pc; 670 } 671 672 return 0; 673} 674 675/* 676 * We pass in lr as zero (cleared in kernel_thread) and the caller 677 * part of the backtrace ABI on the stack also zeroed (in copy_thread) 678 * so that backtraces will stop with this function. 679 * Note that we don't use r0, since copy_thread() clears it. 680 */ 681static void start_kernel_thread(int dummy, int (*fn)(int), int arg) 682{ 683 do_exit(fn(arg)); 684} 685 686/* 687 * Create a kernel thread 688 */ 689int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 690{ 691 struct pt_regs regs; 692 693 memset(®s, 0, sizeof(regs)); 694 regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */ 695 regs.pc = (long) start_kernel_thread; 696 regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */ 697 regs.regs[1] = (long) fn; /* function pointer */ 698 regs.regs[2] = (long) arg; /* parameter register */ 699 700 /* Ok, create the new process.. */ 701 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 702 0, NULL, NULL); 703} 704EXPORT_SYMBOL(kernel_thread); 705 706/* Flush thread state. */ 707void flush_thread(void) 708{ 709 /* Nothing */ 710} 711 712/* 713 * Free current thread data structures etc.. 714 */ 715void exit_thread(void) 716{ 717 /* Nothing */ 718} 719 720void show_regs(struct pt_regs *regs) 721{ 722 struct task_struct *tsk = validate_current(); 723 int i; 724 725 pr_err("\n"); 726 pr_err(" Pid: %d, comm: %20s, CPU: %d\n", 727 tsk->pid, tsk->comm, smp_processor_id()); 728#ifdef __tilegx__ 729 for (i = 0; i < 51; i += 3) 730 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 731 i, regs->regs[i], i+1, regs->regs[i+1], 732 i+2, regs->regs[i+2]); 733 pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n", 734 regs->regs[51], regs->regs[52], regs->tp); 735 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); 736#else 737 for (i = 0; i < 52; i += 4) 738 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT 739 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 740 i, regs->regs[i], i+1, regs->regs[i+1], 741 i+2, regs->regs[i+2], i+3, regs->regs[i+3]); 742 pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n", 743 regs->regs[52], regs->tp, regs->sp, regs->lr); 744#endif 745 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", 746 regs->pc, regs->ex1, regs->faultnum); 747 748 dump_stack_regs(regs); 749} 750