sched.h revision f201ae2356c74bcae130b2177b3dca903ea98071
1#ifndef _LINUX_SCHED_H 2#define _LINUX_SCHED_H 3 4/* 5 * cloning flags: 6 */ 7#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ 8#define CLONE_VM 0x00000100 /* set if VM shared between processes */ 9#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ 10#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ 11#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ 12#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ 13#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 14#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ 15#define CLONE_THREAD 0x00010000 /* Same thread group? */ 16#define CLONE_NEWNS 0x00020000 /* New namespace group? */ 17#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ 18#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ 19#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ 20#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ 21#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ 22#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ 23#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ 24#define CLONE_STOPPED 0x02000000 /* Start in stopped state */ 25#define CLONE_NEWUTS 0x04000000 /* New utsname group? */ 26#define CLONE_NEWIPC 0x08000000 /* New ipcs */ 27#define CLONE_NEWUSER 0x10000000 /* New user namespace */ 28#define CLONE_NEWPID 0x20000000 /* New pid namespace */ 29#define CLONE_NEWNET 0x40000000 /* New network namespace */ 30#define CLONE_IO 0x80000000 /* Clone io context */ 31 32/* 33 * Scheduling policies 34 */ 35#define SCHED_NORMAL 0 36#define SCHED_FIFO 1 37#define SCHED_RR 2 38#define SCHED_BATCH 3 39/* SCHED_ISO: reserved but not implemented yet */ 40#define SCHED_IDLE 5 41 42#ifdef __KERNEL__ 43 44struct sched_param { 45 int sched_priority; 46}; 47 48#include <asm/param.h> /* for HZ */ 49 50#include <linux/capability.h> 51#include <linux/threads.h> 52#include <linux/kernel.h> 53#include <linux/types.h> 54#include <linux/timex.h> 55#include <linux/jiffies.h> 56#include <linux/rbtree.h> 57#include <linux/thread_info.h> 58#include <linux/cpumask.h> 59#include <linux/errno.h> 60#include <linux/nodemask.h> 61#include <linux/mm_types.h> 62 63#include <asm/system.h> 64#include <asm/page.h> 65#include <asm/ptrace.h> 66#include <asm/cputime.h> 67 68#include <linux/smp.h> 69#include <linux/sem.h> 70#include <linux/signal.h> 71#include <linux/fs_struct.h> 72#include <linux/compiler.h> 73#include <linux/completion.h> 74#include <linux/pid.h> 75#include <linux/percpu.h> 76#include <linux/topology.h> 77#include <linux/proportions.h> 78#include <linux/seccomp.h> 79#include <linux/rcupdate.h> 80#include <linux/rtmutex.h> 81 82#include <linux/time.h> 83#include <linux/param.h> 84#include <linux/resource.h> 85#include <linux/timer.h> 86#include <linux/hrtimer.h> 87#include <linux/task_io_accounting.h> 88#include <linux/kobject.h> 89#include <linux/latencytop.h> 90#include <linux/cred.h> 91 92#include <asm/processor.h> 93 94struct mem_cgroup; 95struct exec_domain; 96struct futex_pi_state; 97struct robust_list_head; 98struct bio; 99 100/* 101 * List of flags we want to share for kernel threads, 102 * if only because they are not used by them anyway. 103 */ 104#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) 105 106/* 107 * These are the constant used to fake the fixed-point load-average 108 * counting. Some notes: 109 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 110 * a load-average precision of 10 bits integer + 11 bits fractional 111 * - if you want to count load-averages more often, you need more 112 * precision, or rounding will get you. With 2-second counting freq, 113 * the EXP_n values would be 1981, 2034 and 2043 if still using only 114 * 11 bit fractions. 115 */ 116extern unsigned long avenrun[]; /* Load averages */ 117 118#define FSHIFT 11 /* nr of bits of precision */ 119#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 120#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ 121#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 122#define EXP_5 2014 /* 1/exp(5sec/5min) */ 123#define EXP_15 2037 /* 1/exp(5sec/15min) */ 124 125#define CALC_LOAD(load,exp,n) \ 126 load *= exp; \ 127 load += n*(FIXED_1-exp); \ 128 load >>= FSHIFT; 129 130extern unsigned long total_forks; 131extern int nr_threads; 132DECLARE_PER_CPU(unsigned long, process_counts); 133extern int nr_processes(void); 134extern unsigned long nr_running(void); 135extern unsigned long nr_uninterruptible(void); 136extern unsigned long nr_active(void); 137extern unsigned long nr_iowait(void); 138 139struct seq_file; 140struct cfs_rq; 141struct task_group; 142#ifdef CONFIG_SCHED_DEBUG 143extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 144extern void proc_sched_set_task(struct task_struct *p); 145extern void 146print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 147#else 148static inline void 149proc_sched_show_task(struct task_struct *p, struct seq_file *m) 150{ 151} 152static inline void proc_sched_set_task(struct task_struct *p) 153{ 154} 155static inline void 156print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 157{ 158} 159#endif 160 161extern unsigned long long time_sync_thresh; 162 163/* 164 * Task state bitmask. NOTE! These bits are also 165 * encoded in fs/proc/array.c: get_task_state(). 166 * 167 * We have two separate sets of flags: task->state 168 * is about runnability, while task->exit_state are 169 * about the task exiting. Confusing, but this way 170 * modifying one set can't modify the other one by 171 * mistake. 172 */ 173#define TASK_RUNNING 0 174#define TASK_INTERRUPTIBLE 1 175#define TASK_UNINTERRUPTIBLE 2 176#define __TASK_STOPPED 4 177#define __TASK_TRACED 8 178/* in tsk->exit_state */ 179#define EXIT_ZOMBIE 16 180#define EXIT_DEAD 32 181/* in tsk->state again */ 182#define TASK_DEAD 64 183#define TASK_WAKEKILL 128 184 185/* Convenience macros for the sake of set_task_state */ 186#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 187#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 188#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 189 190/* Convenience macros for the sake of wake_up */ 191#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 192#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 193 194/* get_task_state() */ 195#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 196 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 197 __TASK_TRACED) 198 199#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 200#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 201#define task_is_stopped_or_traced(task) \ 202 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 203#define task_contributes_to_load(task) \ 204 ((task->state & TASK_UNINTERRUPTIBLE) != 0) 205 206#define __set_task_state(tsk, state_value) \ 207 do { (tsk)->state = (state_value); } while (0) 208#define set_task_state(tsk, state_value) \ 209 set_mb((tsk)->state, (state_value)) 210 211/* 212 * set_current_state() includes a barrier so that the write of current->state 213 * is correctly serialised wrt the caller's subsequent test of whether to 214 * actually sleep: 215 * 216 * set_current_state(TASK_UNINTERRUPTIBLE); 217 * if (do_i_need_to_sleep()) 218 * schedule(); 219 * 220 * If the caller does not need such serialisation then use __set_current_state() 221 */ 222#define __set_current_state(state_value) \ 223 do { current->state = (state_value); } while (0) 224#define set_current_state(state_value) \ 225 set_mb(current->state, (state_value)) 226 227/* Task command name length */ 228#define TASK_COMM_LEN 16 229 230#include <linux/spinlock.h> 231 232/* 233 * This serializes "schedule()" and also protects 234 * the run-queue from deletions/modifications (but 235 * _adding_ to the beginning of the run-queue has 236 * a separate lock). 237 */ 238extern rwlock_t tasklist_lock; 239extern spinlock_t mmlist_lock; 240 241struct task_struct; 242 243extern void sched_init(void); 244extern void sched_init_smp(void); 245extern asmlinkage void schedule_tail(struct task_struct *prev); 246extern void init_idle(struct task_struct *idle, int cpu); 247extern void init_idle_bootup_task(struct task_struct *idle); 248 249extern int runqueue_is_locked(void); 250extern void task_rq_unlock_wait(struct task_struct *p); 251 252extern cpumask_t nohz_cpu_mask; 253#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 254extern int select_nohz_load_balancer(int cpu); 255#else 256static inline int select_nohz_load_balancer(int cpu) 257{ 258 return 0; 259} 260#endif 261 262extern unsigned long rt_needs_cpu(int cpu); 263 264/* 265 * Only dump TASK_* tasks. (0 for all tasks) 266 */ 267extern void show_state_filter(unsigned long state_filter); 268 269static inline void show_state(void) 270{ 271 show_state_filter(0); 272} 273 274extern void show_regs(struct pt_regs *); 275 276/* 277 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current 278 * task), SP is the stack pointer of the first frame that should be shown in the back 279 * trace (or NULL if the entire call-chain of the task should be shown). 280 */ 281extern void show_stack(struct task_struct *task, unsigned long *sp); 282 283void io_schedule(void); 284long io_schedule_timeout(long timeout); 285 286extern void cpu_init (void); 287extern void trap_init(void); 288extern void account_process_tick(struct task_struct *task, int user); 289extern void update_process_times(int user); 290extern void scheduler_tick(void); 291 292extern void sched_show_task(struct task_struct *p); 293 294#ifdef CONFIG_DETECT_SOFTLOCKUP 295extern void softlockup_tick(void); 296extern void touch_softlockup_watchdog(void); 297extern void touch_all_softlockup_watchdogs(void); 298extern unsigned int softlockup_panic; 299extern unsigned long sysctl_hung_task_check_count; 300extern unsigned long sysctl_hung_task_timeout_secs; 301extern unsigned long sysctl_hung_task_warnings; 302extern int softlockup_thresh; 303#else 304static inline void softlockup_tick(void) 305{ 306} 307static inline void spawn_softlockup_task(void) 308{ 309} 310static inline void touch_softlockup_watchdog(void) 311{ 312} 313static inline void touch_all_softlockup_watchdogs(void) 314{ 315} 316#endif 317 318 319/* Attach to any functions which should be ignored in wchan output. */ 320#define __sched __attribute__((__section__(".sched.text"))) 321 322/* Linker adds these: start and end of __sched functions */ 323extern char __sched_text_start[], __sched_text_end[]; 324 325/* Is this address in the __sched functions? */ 326extern int in_sched_functions(unsigned long addr); 327 328#define MAX_SCHEDULE_TIMEOUT LONG_MAX 329extern signed long schedule_timeout(signed long timeout); 330extern signed long schedule_timeout_interruptible(signed long timeout); 331extern signed long schedule_timeout_killable(signed long timeout); 332extern signed long schedule_timeout_uninterruptible(signed long timeout); 333asmlinkage void schedule(void); 334 335struct nsproxy; 336struct user_namespace; 337 338/* Maximum number of active map areas.. This is a random (large) number */ 339#define DEFAULT_MAX_MAP_COUNT 65536 340 341extern int sysctl_max_map_count; 342 343#include <linux/aio.h> 344 345extern unsigned long 346arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 347 unsigned long, unsigned long); 348extern unsigned long 349arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 350 unsigned long len, unsigned long pgoff, 351 unsigned long flags); 352extern void arch_unmap_area(struct mm_struct *, unsigned long); 353extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 354 355#if USE_SPLIT_PTLOCKS 356/* 357 * The mm counters are not protected by its page_table_lock, 358 * so must be incremented atomically. 359 */ 360#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) 361#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) 362#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) 363#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 364#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 365 366#else /* !USE_SPLIT_PTLOCKS */ 367/* 368 * The mm counters are protected by its page_table_lock, 369 * so can be incremented directly. 370 */ 371#define set_mm_counter(mm, member, value) (mm)->_##member = (value) 372#define get_mm_counter(mm, member) ((mm)->_##member) 373#define add_mm_counter(mm, member, value) (mm)->_##member += (value) 374#define inc_mm_counter(mm, member) (mm)->_##member++ 375#define dec_mm_counter(mm, member) (mm)->_##member-- 376 377#endif /* !USE_SPLIT_PTLOCKS */ 378 379#define get_mm_rss(mm) \ 380 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) 381#define update_hiwater_rss(mm) do { \ 382 unsigned long _rss = get_mm_rss(mm); \ 383 if ((mm)->hiwater_rss < _rss) \ 384 (mm)->hiwater_rss = _rss; \ 385} while (0) 386#define update_hiwater_vm(mm) do { \ 387 if ((mm)->hiwater_vm < (mm)->total_vm) \ 388 (mm)->hiwater_vm = (mm)->total_vm; \ 389} while (0) 390 391extern void set_dumpable(struct mm_struct *mm, int value); 392extern int get_dumpable(struct mm_struct *mm); 393 394/* mm flags */ 395/* dumpable bits */ 396#define MMF_DUMPABLE 0 /* core dump is permitted */ 397#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ 398#define MMF_DUMPABLE_BITS 2 399 400/* coredump filter bits */ 401#define MMF_DUMP_ANON_PRIVATE 2 402#define MMF_DUMP_ANON_SHARED 3 403#define MMF_DUMP_MAPPED_PRIVATE 4 404#define MMF_DUMP_MAPPED_SHARED 5 405#define MMF_DUMP_ELF_HEADERS 6 406#define MMF_DUMP_HUGETLB_PRIVATE 7 407#define MMF_DUMP_HUGETLB_SHARED 8 408#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 409#define MMF_DUMP_FILTER_BITS 7 410#define MMF_DUMP_FILTER_MASK \ 411 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 412#define MMF_DUMP_FILTER_DEFAULT \ 413 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ 414 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) 415 416#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS 417# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) 418#else 419# define MMF_DUMP_MASK_DEFAULT_ELF 0 420#endif 421 422struct sighand_struct { 423 atomic_t count; 424 struct k_sigaction action[_NSIG]; 425 spinlock_t siglock; 426 wait_queue_head_t signalfd_wqh; 427}; 428 429struct pacct_struct { 430 int ac_flag; 431 long ac_exitcode; 432 unsigned long ac_mem; 433 cputime_t ac_utime, ac_stime; 434 unsigned long ac_minflt, ac_majflt; 435}; 436 437/** 438 * struct task_cputime - collected CPU time counts 439 * @utime: time spent in user mode, in &cputime_t units 440 * @stime: time spent in kernel mode, in &cputime_t units 441 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 442 * 443 * This structure groups together three kinds of CPU time that are 444 * tracked for threads and thread groups. Most things considering 445 * CPU time want to group these counts together and treat all three 446 * of them in parallel. 447 */ 448struct task_cputime { 449 cputime_t utime; 450 cputime_t stime; 451 unsigned long long sum_exec_runtime; 452}; 453/* Alternate field names when used to cache expirations. */ 454#define prof_exp stime 455#define virt_exp utime 456#define sched_exp sum_exec_runtime 457 458/** 459 * struct thread_group_cputime - thread group interval timer counts 460 * @totals: thread group interval timers; substructure for 461 * uniprocessor kernel, per-cpu for SMP kernel. 462 * 463 * This structure contains the version of task_cputime, above, that is 464 * used for thread group CPU clock calculations. 465 */ 466struct thread_group_cputime { 467 struct task_cputime *totals; 468}; 469 470/* 471 * NOTE! "signal_struct" does not have it's own 472 * locking, because a shared signal_struct always 473 * implies a shared sighand_struct, so locking 474 * sighand_struct is always a proper superset of 475 * the locking of signal_struct. 476 */ 477struct signal_struct { 478 atomic_t count; 479 atomic_t live; 480 481 wait_queue_head_t wait_chldexit; /* for wait4() */ 482 483 /* current thread group signal load-balancing target: */ 484 struct task_struct *curr_target; 485 486 /* shared signal handling: */ 487 struct sigpending shared_pending; 488 489 /* thread group exit support */ 490 int group_exit_code; 491 /* overloaded: 492 * - notify group_exit_task when ->count is equal to notify_count 493 * - everyone except group_exit_task is stopped during signal delivery 494 * of fatal signals, group_exit_task processes the signal. 495 */ 496 int notify_count; 497 struct task_struct *group_exit_task; 498 499 /* thread group stop support, overloads group_exit_code too */ 500 int group_stop_count; 501 unsigned int flags; /* see SIGNAL_* flags below */ 502 503 /* POSIX.1b Interval Timers */ 504 struct list_head posix_timers; 505 506 /* ITIMER_REAL timer for the process */ 507 struct hrtimer real_timer; 508 struct pid *leader_pid; 509 ktime_t it_real_incr; 510 511 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ 512 cputime_t it_prof_expires, it_virt_expires; 513 cputime_t it_prof_incr, it_virt_incr; 514 515 /* 516 * Thread group totals for process CPU clocks. 517 * See thread_group_cputime(), et al, for details. 518 */ 519 struct thread_group_cputime cputime; 520 521 /* Earliest-expiration cache. */ 522 struct task_cputime cputime_expires; 523 524 struct list_head cpu_timers[3]; 525 526 /* job control IDs */ 527 528 /* 529 * pgrp and session fields are deprecated. 530 * use the task_session_Xnr and task_pgrp_Xnr routines below 531 */ 532 533 union { 534 pid_t pgrp __deprecated; 535 pid_t __pgrp; 536 }; 537 538 struct pid *tty_old_pgrp; 539 540 union { 541 pid_t session __deprecated; 542 pid_t __session; 543 }; 544 545 /* boolean value for session group leader */ 546 int leader; 547 548 struct tty_struct *tty; /* NULL if no tty */ 549 550 /* 551 * Cumulative resource counters for dead threads in the group, 552 * and for reaped dead child processes forked by this group. 553 * Live threads maintain their own counters and add to these 554 * in __exit_signal, except for the group leader. 555 */ 556 cputime_t cutime, cstime; 557 cputime_t gtime; 558 cputime_t cgtime; 559 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 560 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 561 unsigned long inblock, oublock, cinblock, coublock; 562 struct task_io_accounting ioac; 563 564 /* 565 * We don't bother to synchronize most readers of this at all, 566 * because there is no reader checking a limit that actually needs 567 * to get both rlim_cur and rlim_max atomically, and either one 568 * alone is a single word that can safely be read normally. 569 * getrlimit/setrlimit use task_lock(current->group_leader) to 570 * protect this instead of the siglock, because they really 571 * have no need to disable irqs. 572 */ 573 struct rlimit rlim[RLIM_NLIMITS]; 574 575 /* keep the process-shared keyrings here so that they do the right 576 * thing in threads created with CLONE_THREAD */ 577#ifdef CONFIG_KEYS 578 struct key *session_keyring; /* keyring inherited over fork */ 579 struct key *process_keyring; /* keyring private to this process */ 580#endif 581#ifdef CONFIG_BSD_PROCESS_ACCT 582 struct pacct_struct pacct; /* per-process accounting information */ 583#endif 584#ifdef CONFIG_TASKSTATS 585 struct taskstats *stats; 586#endif 587#ifdef CONFIG_AUDIT 588 unsigned audit_tty; 589 struct tty_audit_buf *tty_audit_buf; 590#endif 591}; 592 593/* Context switch must be unlocked if interrupts are to be enabled */ 594#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 595# define __ARCH_WANT_UNLOCKED_CTXSW 596#endif 597 598/* 599 * Bits in flags field of signal_struct. 600 */ 601#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 602#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ 603#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ 604#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ 605/* 606 * Pending notifications to parent. 607 */ 608#define SIGNAL_CLD_STOPPED 0x00000010 609#define SIGNAL_CLD_CONTINUED 0x00000020 610#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 611 612#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 613 614/* If true, all threads except ->group_exit_task have pending SIGKILL */ 615static inline int signal_group_exit(const struct signal_struct *sig) 616{ 617 return (sig->flags & SIGNAL_GROUP_EXIT) || 618 (sig->group_exit_task != NULL); 619} 620 621/* 622 * Some day this will be a full-fledged user tracking system.. 623 */ 624struct user_struct { 625 atomic_t __count; /* reference count */ 626 atomic_t processes; /* How many processes does this user have? */ 627 atomic_t files; /* How many open files does this user have? */ 628 atomic_t sigpending; /* How many pending signals does this user have? */ 629#ifdef CONFIG_INOTIFY_USER 630 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 631 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 632#endif 633#ifdef CONFIG_POSIX_MQUEUE 634 /* protected by mq_lock */ 635 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 636#endif 637 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 638 639#ifdef CONFIG_KEYS 640 struct key *uid_keyring; /* UID specific keyring */ 641 struct key *session_keyring; /* UID's default session keyring */ 642#endif 643 644 /* Hash table maintenance information */ 645 struct hlist_node uidhash_node; 646 uid_t uid; 647 648#ifdef CONFIG_USER_SCHED 649 struct task_group *tg; 650#ifdef CONFIG_SYSFS 651 struct kobject kobj; 652 struct work_struct work; 653#endif 654#endif 655}; 656 657extern int uids_sysfs_init(void); 658 659extern struct user_struct *find_user(uid_t); 660 661extern struct user_struct root_user; 662#define INIT_USER (&root_user) 663 664struct backing_dev_info; 665struct reclaim_state; 666 667#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 668struct sched_info { 669 /* cumulative counters */ 670 unsigned long pcount; /* # of times run on this cpu */ 671 unsigned long long cpu_time, /* time spent on the cpu */ 672 run_delay; /* time spent waiting on a runqueue */ 673 674 /* timestamps */ 675 unsigned long long last_arrival,/* when we last ran on a cpu */ 676 last_queued; /* when we were last queued to run */ 677#ifdef CONFIG_SCHEDSTATS 678 /* BKL stats */ 679 unsigned int bkl_count; 680#endif 681}; 682#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 683 684#ifdef CONFIG_TASK_DELAY_ACCT 685struct task_delay_info { 686 spinlock_t lock; 687 unsigned int flags; /* Private per-task flags */ 688 689 /* For each stat XXX, add following, aligned appropriately 690 * 691 * struct timespec XXX_start, XXX_end; 692 * u64 XXX_delay; 693 * u32 XXX_count; 694 * 695 * Atomicity of updates to XXX_delay, XXX_count protected by 696 * single lock above (split into XXX_lock if contention is an issue). 697 */ 698 699 /* 700 * XXX_count is incremented on every XXX operation, the delay 701 * associated with the operation is added to XXX_delay. 702 * XXX_delay contains the accumulated delay time in nanoseconds. 703 */ 704 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 705 u64 blkio_delay; /* wait for sync block io completion */ 706 u64 swapin_delay; /* wait for swapin block io completion */ 707 u32 blkio_count; /* total count of the number of sync block */ 708 /* io operations performed */ 709 u32 swapin_count; /* total count of the number of swapin block */ 710 /* io operations performed */ 711 712 struct timespec freepages_start, freepages_end; 713 u64 freepages_delay; /* wait for memory reclaim */ 714 u32 freepages_count; /* total count of memory reclaim */ 715}; 716#endif /* CONFIG_TASK_DELAY_ACCT */ 717 718static inline int sched_info_on(void) 719{ 720#ifdef CONFIG_SCHEDSTATS 721 return 1; 722#elif defined(CONFIG_TASK_DELAY_ACCT) 723 extern int delayacct_on; 724 return delayacct_on; 725#else 726 return 0; 727#endif 728} 729 730enum cpu_idle_type { 731 CPU_IDLE, 732 CPU_NOT_IDLE, 733 CPU_NEWLY_IDLE, 734 CPU_MAX_IDLE_TYPES 735}; 736 737/* 738 * sched-domains (multiprocessor balancing) declarations: 739 */ 740 741/* 742 * Increase resolution of nice-level calculations: 743 */ 744#define SCHED_LOAD_SHIFT 10 745#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 746 747#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE 748 749#ifdef CONFIG_SMP 750#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 751#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 752#define SD_BALANCE_EXEC 4 /* Balance on exec */ 753#define SD_BALANCE_FORK 8 /* Balance on fork, clone */ 754#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ 755#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ 756#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ 757#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 758#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 759#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 760#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 761#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 762 763#define BALANCE_FOR_MC_POWER \ 764 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 765 766#define BALANCE_FOR_PKG_POWER \ 767 ((sched_mc_power_savings || sched_smt_power_savings) ? \ 768 SD_POWERSAVINGS_BALANCE : 0) 769 770#define test_sd_parent(sd, flag) ((sd->parent && \ 771 (sd->parent->flags & flag)) ? 1 : 0) 772 773 774struct sched_group { 775 struct sched_group *next; /* Must be a circular list */ 776 cpumask_t cpumask; 777 778 /* 779 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 780 * single CPU. This is read only (except for setup, hotplug CPU). 781 * Note : Never change cpu_power without recompute its reciprocal 782 */ 783 unsigned int __cpu_power; 784 /* 785 * reciprocal value of cpu_power to avoid expensive divides 786 * (see include/linux/reciprocal_div.h) 787 */ 788 u32 reciprocal_cpu_power; 789}; 790 791enum sched_domain_level { 792 SD_LV_NONE = 0, 793 SD_LV_SIBLING, 794 SD_LV_MC, 795 SD_LV_CPU, 796 SD_LV_NODE, 797 SD_LV_ALLNODES, 798 SD_LV_MAX 799}; 800 801struct sched_domain_attr { 802 int relax_domain_level; 803}; 804 805#define SD_ATTR_INIT (struct sched_domain_attr) { \ 806 .relax_domain_level = -1, \ 807} 808 809struct sched_domain { 810 /* These fields must be setup */ 811 struct sched_domain *parent; /* top domain must be null terminated */ 812 struct sched_domain *child; /* bottom domain must be null terminated */ 813 struct sched_group *groups; /* the balancing groups of the domain */ 814 cpumask_t span; /* span of all CPUs in this domain */ 815 unsigned long min_interval; /* Minimum balance interval ms */ 816 unsigned long max_interval; /* Maximum balance interval ms */ 817 unsigned int busy_factor; /* less balancing by factor if busy */ 818 unsigned int imbalance_pct; /* No balance until over watermark */ 819 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 820 unsigned int busy_idx; 821 unsigned int idle_idx; 822 unsigned int newidle_idx; 823 unsigned int wake_idx; 824 unsigned int forkexec_idx; 825 int flags; /* See SD_* */ 826 enum sched_domain_level level; 827 828 /* Runtime fields. */ 829 unsigned long last_balance; /* init to jiffies. units in jiffies */ 830 unsigned int balance_interval; /* initialise to 1. units in ms. */ 831 unsigned int nr_balance_failed; /* initialise to 0 */ 832 833 u64 last_update; 834 835#ifdef CONFIG_SCHEDSTATS 836 /* load_balance() stats */ 837 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 838 unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 839 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 840 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; 841 unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 842 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 843 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 844 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 845 846 /* Active load balancing */ 847 unsigned int alb_count; 848 unsigned int alb_failed; 849 unsigned int alb_pushed; 850 851 /* SD_BALANCE_EXEC stats */ 852 unsigned int sbe_count; 853 unsigned int sbe_balanced; 854 unsigned int sbe_pushed; 855 856 /* SD_BALANCE_FORK stats */ 857 unsigned int sbf_count; 858 unsigned int sbf_balanced; 859 unsigned int sbf_pushed; 860 861 /* try_to_wake_up() stats */ 862 unsigned int ttwu_wake_remote; 863 unsigned int ttwu_move_affine; 864 unsigned int ttwu_move_balance; 865#endif 866#ifdef CONFIG_SCHED_DEBUG 867 char *name; 868#endif 869}; 870 871extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 872 struct sched_domain_attr *dattr_new); 873extern int arch_reinit_sched_domains(void); 874 875#else /* CONFIG_SMP */ 876 877struct sched_domain_attr; 878 879static inline void 880partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 881 struct sched_domain_attr *dattr_new) 882{ 883} 884#endif /* !CONFIG_SMP */ 885 886struct io_context; /* See blkdev.h */ 887#define NGROUPS_SMALL 32 888#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) 889struct group_info { 890 int ngroups; 891 atomic_t usage; 892 gid_t small_block[NGROUPS_SMALL]; 893 int nblocks; 894 gid_t *blocks[0]; 895}; 896 897/* 898 * get_group_info() must be called with the owning task locked (via task_lock()) 899 * when task != current. The reason being that the vast majority of callers are 900 * looking at current->group_info, which can not be changed except by the 901 * current task. Changing current->group_info requires the task lock, too. 902 */ 903#define get_group_info(group_info) do { \ 904 atomic_inc(&(group_info)->usage); \ 905} while (0) 906 907#define put_group_info(group_info) do { \ 908 if (atomic_dec_and_test(&(group_info)->usage)) \ 909 groups_free(group_info); \ 910} while (0) 911 912extern struct group_info *groups_alloc(int gidsetsize); 913extern void groups_free(struct group_info *group_info); 914extern int set_current_groups(struct group_info *group_info); 915extern int groups_search(struct group_info *group_info, gid_t grp); 916/* access the groups "array" with this macro */ 917#define GROUP_AT(gi, i) \ 918 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 919 920#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 921extern void prefetch_stack(struct task_struct *t); 922#else 923static inline void prefetch_stack(struct task_struct *t) { } 924#endif 925 926struct audit_context; /* See audit.c */ 927struct mempolicy; 928struct pipe_inode_info; 929struct uts_namespace; 930 931struct rq; 932struct sched_domain; 933 934struct sched_class { 935 const struct sched_class *next; 936 937 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 938 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 939 void (*yield_task) (struct rq *rq); 940 941 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); 942 943 struct task_struct * (*pick_next_task) (struct rq *rq); 944 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 945 946#ifdef CONFIG_SMP 947 int (*select_task_rq)(struct task_struct *p, int sync); 948 949 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, 950 struct rq *busiest, unsigned long max_load_move, 951 struct sched_domain *sd, enum cpu_idle_type idle, 952 int *all_pinned, int *this_best_prio); 953 954 int (*move_one_task) (struct rq *this_rq, int this_cpu, 955 struct rq *busiest, struct sched_domain *sd, 956 enum cpu_idle_type idle); 957 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 958 void (*post_schedule) (struct rq *this_rq); 959 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 960 961 void (*set_cpus_allowed)(struct task_struct *p, 962 const cpumask_t *newmask); 963 964 void (*rq_online)(struct rq *rq); 965 void (*rq_offline)(struct rq *rq); 966#endif 967 968 void (*set_curr_task) (struct rq *rq); 969 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 970 void (*task_new) (struct rq *rq, struct task_struct *p); 971 972 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 973 int running); 974 void (*switched_to) (struct rq *this_rq, struct task_struct *task, 975 int running); 976 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 977 int oldprio, int running); 978 979#ifdef CONFIG_FAIR_GROUP_SCHED 980 void (*moved_group) (struct task_struct *p); 981#endif 982}; 983 984struct load_weight { 985 unsigned long weight, inv_weight; 986}; 987 988/* 989 * CFS stats for a schedulable entity (task, task-group etc) 990 * 991 * Current field usage histogram: 992 * 993 * 4 se->block_start 994 * 4 se->run_node 995 * 4 se->sleep_start 996 * 6 se->load.weight 997 */ 998struct sched_entity { 999 struct load_weight load; /* for load-balancing */ 1000 struct rb_node run_node; 1001 struct list_head group_node; 1002 unsigned int on_rq; 1003 1004 u64 exec_start; 1005 u64 sum_exec_runtime; 1006 u64 vruntime; 1007 u64 prev_sum_exec_runtime; 1008 1009 u64 last_wakeup; 1010 u64 avg_overlap; 1011 1012#ifdef CONFIG_SCHEDSTATS 1013 u64 wait_start; 1014 u64 wait_max; 1015 u64 wait_count; 1016 u64 wait_sum; 1017 1018 u64 sleep_start; 1019 u64 sleep_max; 1020 s64 sum_sleep_runtime; 1021 1022 u64 block_start; 1023 u64 block_max; 1024 u64 exec_max; 1025 u64 slice_max; 1026 1027 u64 nr_migrations; 1028 u64 nr_migrations_cold; 1029 u64 nr_failed_migrations_affine; 1030 u64 nr_failed_migrations_running; 1031 u64 nr_failed_migrations_hot; 1032 u64 nr_forced_migrations; 1033 u64 nr_forced2_migrations; 1034 1035 u64 nr_wakeups; 1036 u64 nr_wakeups_sync; 1037 u64 nr_wakeups_migrate; 1038 u64 nr_wakeups_local; 1039 u64 nr_wakeups_remote; 1040 u64 nr_wakeups_affine; 1041 u64 nr_wakeups_affine_attempts; 1042 u64 nr_wakeups_passive; 1043 u64 nr_wakeups_idle; 1044#endif 1045 1046#ifdef CONFIG_FAIR_GROUP_SCHED 1047 struct sched_entity *parent; 1048 /* rq on which this entity is (to be) queued: */ 1049 struct cfs_rq *cfs_rq; 1050 /* rq "owned" by this entity/group: */ 1051 struct cfs_rq *my_q; 1052#endif 1053}; 1054 1055struct sched_rt_entity { 1056 struct list_head run_list; 1057 unsigned long timeout; 1058 unsigned int time_slice; 1059 int nr_cpus_allowed; 1060 1061 struct sched_rt_entity *back; 1062#ifdef CONFIG_RT_GROUP_SCHED 1063 struct sched_rt_entity *parent; 1064 /* rq on which this entity is (to be) queued: */ 1065 struct rt_rq *rt_rq; 1066 /* rq "owned" by this entity/group: */ 1067 struct rt_rq *my_q; 1068#endif 1069}; 1070 1071struct task_struct { 1072 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1073 void *stack; 1074 atomic_t usage; 1075 unsigned int flags; /* per process flags, defined below */ 1076 unsigned int ptrace; 1077 1078 int lock_depth; /* BKL lock depth */ 1079 1080#ifdef CONFIG_SMP 1081#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1082 int oncpu; 1083#endif 1084#endif 1085 1086 int prio, static_prio, normal_prio; 1087 unsigned int rt_priority; 1088 const struct sched_class *sched_class; 1089 struct sched_entity se; 1090 struct sched_rt_entity rt; 1091 1092#ifdef CONFIG_PREEMPT_NOTIFIERS 1093 /* list of struct preempt_notifier: */ 1094 struct hlist_head preempt_notifiers; 1095#endif 1096 1097 /* 1098 * fpu_counter contains the number of consecutive context switches 1099 * that the FPU is used. If this is over a threshold, the lazy fpu 1100 * saving becomes unlazy to save the trap. This is an unsigned char 1101 * so that after 256 times the counter wraps and the behavior turns 1102 * lazy again; this to deal with bursty apps that only use FPU for 1103 * a short time 1104 */ 1105 unsigned char fpu_counter; 1106 s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ 1107#ifdef CONFIG_BLK_DEV_IO_TRACE 1108 unsigned int btrace_seq; 1109#endif 1110 1111 unsigned int policy; 1112 cpumask_t cpus_allowed; 1113 1114#ifdef CONFIG_PREEMPT_RCU 1115 int rcu_read_lock_nesting; 1116 int rcu_flipctr_idx; 1117#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1118 1119#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1120 struct sched_info sched_info; 1121#endif 1122 1123 struct list_head tasks; 1124 1125 struct mm_struct *mm, *active_mm; 1126 1127/* task state */ 1128 struct linux_binfmt *binfmt; 1129 int exit_state; 1130 int exit_code, exit_signal; 1131 int pdeath_signal; /* The signal sent when the parent dies */ 1132 /* ??? */ 1133 unsigned int personality; 1134 unsigned did_exec:1; 1135 pid_t pid; 1136 pid_t tgid; 1137 1138#ifdef CONFIG_CC_STACKPROTECTOR 1139 /* Canary value for the -fstack-protector gcc feature */ 1140 unsigned long stack_canary; 1141#endif 1142 /* 1143 * pointers to (original) parent process, youngest child, younger sibling, 1144 * older sibling, respectively. (p->father can be replaced with 1145 * p->real_parent->pid) 1146 */ 1147 struct task_struct *real_parent; /* real parent process */ 1148 struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ 1149 /* 1150 * children/sibling forms the list of my natural children 1151 */ 1152 struct list_head children; /* list of my children */ 1153 struct list_head sibling; /* linkage in my parent's children list */ 1154 struct task_struct *group_leader; /* threadgroup leader */ 1155 1156 /* 1157 * ptraced is the list of tasks this task is using ptrace on. 1158 * This includes both natural children and PTRACE_ATTACH targets. 1159 * p->ptrace_entry is p's link on the p->parent->ptraced list. 1160 */ 1161 struct list_head ptraced; 1162 struct list_head ptrace_entry; 1163 1164 /* PID/PID hash table linkage. */ 1165 struct pid_link pids[PIDTYPE_MAX]; 1166 struct list_head thread_group; 1167 1168 struct completion *vfork_done; /* for vfork() */ 1169 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1170 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1171 1172 cputime_t utime, stime, utimescaled, stimescaled; 1173 cputime_t gtime; 1174 cputime_t prev_utime, prev_stime; 1175 unsigned long nvcsw, nivcsw; /* context switch counts */ 1176 struct timespec start_time; /* monotonic time */ 1177 struct timespec real_start_time; /* boot based time */ 1178/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1179 unsigned long min_flt, maj_flt; 1180 1181 struct task_cputime cputime_expires; 1182 struct list_head cpu_timers[3]; 1183 1184/* process credentials */ 1185 uid_t uid,euid,suid,fsuid; 1186 gid_t gid,egid,sgid,fsgid; 1187 struct group_info *group_info; 1188 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1189 struct user_struct *user; 1190 unsigned securebits; 1191#ifdef CONFIG_KEYS 1192 unsigned char jit_keyring; /* default keyring to attach requested keys to */ 1193 struct key *request_key_auth; /* assumed request_key authority */ 1194 struct key *thread_keyring; /* keyring private to this thread */ 1195#endif 1196 char comm[TASK_COMM_LEN]; /* executable name excluding path 1197 - access with [gs]et_task_comm (which lock 1198 it with task_lock()) 1199 - initialized normally by flush_old_exec */ 1200/* file system info */ 1201 int link_count, total_link_count; 1202#ifdef CONFIG_SYSVIPC 1203/* ipc stuff */ 1204 struct sysv_sem sysvsem; 1205#endif 1206#ifdef CONFIG_DETECT_SOFTLOCKUP 1207/* hung task detection */ 1208 unsigned long last_switch_timestamp; 1209 unsigned long last_switch_count; 1210#endif 1211/* CPU-specific state of this task */ 1212 struct thread_struct thread; 1213/* filesystem information */ 1214 struct fs_struct *fs; 1215/* open file information */ 1216 struct files_struct *files; 1217/* namespaces */ 1218 struct nsproxy *nsproxy; 1219/* signal handlers */ 1220 struct signal_struct *signal; 1221 struct sighand_struct *sighand; 1222 1223 sigset_t blocked, real_blocked; 1224 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 1225 struct sigpending pending; 1226 1227 unsigned long sas_ss_sp; 1228 size_t sas_ss_size; 1229 int (*notifier)(void *priv); 1230 void *notifier_data; 1231 sigset_t *notifier_mask; 1232#ifdef CONFIG_SECURITY 1233 void *security; 1234#endif 1235 struct audit_context *audit_context; 1236#ifdef CONFIG_AUDITSYSCALL 1237 uid_t loginuid; 1238 unsigned int sessionid; 1239#endif 1240 seccomp_t seccomp; 1241 1242/* Thread group tracking */ 1243 u32 parent_exec_id; 1244 u32 self_exec_id; 1245/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ 1246 spinlock_t alloc_lock; 1247 1248 /* Protection of the PI data structures: */ 1249 spinlock_t pi_lock; 1250 1251#ifdef CONFIG_RT_MUTEXES 1252 /* PI waiters blocked on a rt_mutex held by this task */ 1253 struct plist_head pi_waiters; 1254 /* Deadlock detection and priority inheritance handling */ 1255 struct rt_mutex_waiter *pi_blocked_on; 1256#endif 1257 1258#ifdef CONFIG_DEBUG_MUTEXES 1259 /* mutex deadlock detection */ 1260 struct mutex_waiter *blocked_on; 1261#endif 1262#ifdef CONFIG_TRACE_IRQFLAGS 1263 unsigned int irq_events; 1264 int hardirqs_enabled; 1265 unsigned long hardirq_enable_ip; 1266 unsigned int hardirq_enable_event; 1267 unsigned long hardirq_disable_ip; 1268 unsigned int hardirq_disable_event; 1269 int softirqs_enabled; 1270 unsigned long softirq_disable_ip; 1271 unsigned int softirq_disable_event; 1272 unsigned long softirq_enable_ip; 1273 unsigned int softirq_enable_event; 1274 int hardirq_context; 1275 int softirq_context; 1276#endif 1277#ifdef CONFIG_LOCKDEP 1278# define MAX_LOCK_DEPTH 48UL 1279 u64 curr_chain_key; 1280 int lockdep_depth; 1281 unsigned int lockdep_recursion; 1282 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1283#endif 1284 1285/* journalling filesystem info */ 1286 void *journal_info; 1287 1288/* stacked block device info */ 1289 struct bio *bio_list, **bio_tail; 1290 1291/* VM state */ 1292 struct reclaim_state *reclaim_state; 1293 1294 struct backing_dev_info *backing_dev_info; 1295 1296 struct io_context *io_context; 1297 1298 unsigned long ptrace_message; 1299 siginfo_t *last_siginfo; /* For ptrace use. */ 1300 struct task_io_accounting ioac; 1301#if defined(CONFIG_TASK_XACCT) 1302 u64 acct_rss_mem1; /* accumulated rss usage */ 1303 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1304 cputime_t acct_timexpd; /* stime + utime since last update */ 1305#endif 1306#ifdef CONFIG_CPUSETS 1307 nodemask_t mems_allowed; 1308 int cpuset_mems_generation; 1309 int cpuset_mem_spread_rotor; 1310#endif 1311#ifdef CONFIG_CGROUPS 1312 /* Control Group info protected by css_set_lock */ 1313 struct css_set *cgroups; 1314 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1315 struct list_head cg_list; 1316#endif 1317#ifdef CONFIG_FUTEX 1318 struct robust_list_head __user *robust_list; 1319#ifdef CONFIG_COMPAT 1320 struct compat_robust_list_head __user *compat_robust_list; 1321#endif 1322 struct list_head pi_state_list; 1323 struct futex_pi_state *pi_state_cache; 1324#endif 1325#ifdef CONFIG_NUMA 1326 struct mempolicy *mempolicy; 1327 short il_next; 1328#endif 1329 atomic_t fs_excl; /* holding fs exclusive resources */ 1330 struct rcu_head rcu; 1331 1332 /* 1333 * cache last used pipe for splice 1334 */ 1335 struct pipe_inode_info *splice_pipe; 1336#ifdef CONFIG_TASK_DELAY_ACCT 1337 struct task_delay_info *delays; 1338#endif 1339#ifdef CONFIG_FAULT_INJECTION 1340 int make_it_fail; 1341#endif 1342 struct prop_local_single dirties; 1343#ifdef CONFIG_LATENCYTOP 1344 int latency_record_count; 1345 struct latency_record latency_record[LT_SAVECOUNT]; 1346#endif 1347 /* 1348 * time slack values; these are used to round up poll() and 1349 * select() etc timeout values. These are in nanoseconds. 1350 */ 1351 unsigned long timer_slack_ns; 1352 unsigned long default_timer_slack_ns; 1353 1354 struct list_head *scm_work_list; 1355#ifdef CONFIG_FUNCTION_RET_TRACER 1356 /* Index of current stored adress in ret_stack */ 1357 int curr_ret_stack; 1358 /* Stack of return addresses for return function tracing */ 1359 struct ftrace_ret_stack *ret_stack; 1360 /* 1361 * Number of functions that haven't been traced 1362 * because of depth overrun. 1363 */ 1364 atomic_t trace_overrun; 1365#endif 1366}; 1367 1368/* 1369 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1370 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH 1371 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority 1372 * values are inverted: lower p->prio value means higher priority. 1373 * 1374 * The MAX_USER_RT_PRIO value allows the actual maximum 1375 * RT priority to be separate from the value exported to 1376 * user-space. This allows kernel threads to set their 1377 * priority to a value higher than any user task. Note: 1378 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. 1379 */ 1380 1381#define MAX_USER_RT_PRIO 100 1382#define MAX_RT_PRIO MAX_USER_RT_PRIO 1383 1384#define MAX_PRIO (MAX_RT_PRIO + 40) 1385#define DEFAULT_PRIO (MAX_RT_PRIO + 20) 1386 1387static inline int rt_prio(int prio) 1388{ 1389 if (unlikely(prio < MAX_RT_PRIO)) 1390 return 1; 1391 return 0; 1392} 1393 1394static inline int rt_task(struct task_struct *p) 1395{ 1396 return rt_prio(p->prio); 1397} 1398 1399static inline void set_task_session(struct task_struct *tsk, pid_t session) 1400{ 1401 tsk->signal->__session = session; 1402} 1403 1404static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) 1405{ 1406 tsk->signal->__pgrp = pgrp; 1407} 1408 1409static inline struct pid *task_pid(struct task_struct *task) 1410{ 1411 return task->pids[PIDTYPE_PID].pid; 1412} 1413 1414static inline struct pid *task_tgid(struct task_struct *task) 1415{ 1416 return task->group_leader->pids[PIDTYPE_PID].pid; 1417} 1418 1419static inline struct pid *task_pgrp(struct task_struct *task) 1420{ 1421 return task->group_leader->pids[PIDTYPE_PGID].pid; 1422} 1423 1424static inline struct pid *task_session(struct task_struct *task) 1425{ 1426 return task->group_leader->pids[PIDTYPE_SID].pid; 1427} 1428 1429struct pid_namespace; 1430 1431/* 1432 * the helpers to get the task's different pids as they are seen 1433 * from various namespaces 1434 * 1435 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1436 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1437 * current. 1438 * task_xid_nr_ns() : id seen from the ns specified; 1439 * 1440 * set_task_vxid() : assigns a virtual id to a task; 1441 * 1442 * see also pid_nr() etc in include/linux/pid.h 1443 */ 1444 1445static inline pid_t task_pid_nr(struct task_struct *tsk) 1446{ 1447 return tsk->pid; 1448} 1449 1450pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1451 1452static inline pid_t task_pid_vnr(struct task_struct *tsk) 1453{ 1454 return pid_vnr(task_pid(tsk)); 1455} 1456 1457 1458static inline pid_t task_tgid_nr(struct task_struct *tsk) 1459{ 1460 return tsk->tgid; 1461} 1462 1463pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1464 1465static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1466{ 1467 return pid_vnr(task_tgid(tsk)); 1468} 1469 1470 1471static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1472{ 1473 return tsk->signal->__pgrp; 1474} 1475 1476pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1477 1478static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1479{ 1480 return pid_vnr(task_pgrp(tsk)); 1481} 1482 1483 1484static inline pid_t task_session_nr(struct task_struct *tsk) 1485{ 1486 return tsk->signal->__session; 1487} 1488 1489pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1490 1491static inline pid_t task_session_vnr(struct task_struct *tsk) 1492{ 1493 return pid_vnr(task_session(tsk)); 1494} 1495 1496 1497/** 1498 * pid_alive - check that a task structure is not stale 1499 * @p: Task structure to be checked. 1500 * 1501 * Test if a process is not yet dead (at most zombie state) 1502 * If pid_alive fails, then pointers within the task structure 1503 * can be stale and must not be dereferenced. 1504 */ 1505static inline int pid_alive(struct task_struct *p) 1506{ 1507 return p->pids[PIDTYPE_PID].pid != NULL; 1508} 1509 1510/** 1511 * is_global_init - check if a task structure is init 1512 * @tsk: Task structure to be checked. 1513 * 1514 * Check if a task structure is the first user space task the kernel created. 1515 */ 1516static inline int is_global_init(struct task_struct *tsk) 1517{ 1518 return tsk->pid == 1; 1519} 1520 1521/* 1522 * is_container_init: 1523 * check whether in the task is init in its own pid namespace. 1524 */ 1525extern int is_container_init(struct task_struct *tsk); 1526 1527extern struct pid *cad_pid; 1528 1529extern void free_task(struct task_struct *tsk); 1530#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 1531 1532extern void __put_task_struct(struct task_struct *t); 1533 1534static inline void put_task_struct(struct task_struct *t) 1535{ 1536 if (atomic_dec_and_test(&t->usage)) 1537 __put_task_struct(t); 1538} 1539 1540extern cputime_t task_utime(struct task_struct *p); 1541extern cputime_t task_stime(struct task_struct *p); 1542extern cputime_t task_gtime(struct task_struct *p); 1543 1544/* 1545 * Per process flags 1546 */ 1547#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ 1548 /* Not implemented yet, only for 486*/ 1549#define PF_STARTING 0x00000002 /* being created */ 1550#define PF_EXITING 0x00000004 /* getting shut down */ 1551#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1552#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1553#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1554#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1555#define PF_DUMPCORE 0x00000200 /* dumped core */ 1556#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1557#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1558#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ 1559#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1560#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1561#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1562#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1563#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1564#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 1565#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1566#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1567#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1568#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1569#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1570#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1571#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ 1572#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1573#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1574#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1575#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1576 1577/* 1578 * Only the _current_ task can read/write to tsk->flags, but other 1579 * tasks can access tsk->flags in readonly mode for example 1580 * with tsk_used_math (like during threaded core dumping). 1581 * There is however an exception to this rule during ptrace 1582 * or during fork: the ptracer task is allowed to write to the 1583 * child->flags of its traced child (same goes for fork, the parent 1584 * can write to the child->flags), because we're guaranteed the 1585 * child is not running and in turn not changing child->flags 1586 * at the same time the parent does it. 1587 */ 1588#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1589#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1590#define clear_used_math() clear_stopped_child_used_math(current) 1591#define set_used_math() set_stopped_child_used_math(current) 1592#define conditional_stopped_child_used_math(condition, child) \ 1593 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1594#define conditional_used_math(condition) \ 1595 conditional_stopped_child_used_math(condition, current) 1596#define copy_to_stopped_child_used_math(child) \ 1597 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1598/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1599#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1600#define used_math() tsk_used_math(current) 1601 1602#ifdef CONFIG_SMP 1603extern int set_cpus_allowed_ptr(struct task_struct *p, 1604 const cpumask_t *new_mask); 1605#else 1606static inline int set_cpus_allowed_ptr(struct task_struct *p, 1607 const cpumask_t *new_mask) 1608{ 1609 if (!cpu_isset(0, *new_mask)) 1610 return -EINVAL; 1611 return 0; 1612} 1613#endif 1614static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1615{ 1616 return set_cpus_allowed_ptr(p, &new_mask); 1617} 1618 1619extern unsigned long long sched_clock(void); 1620 1621extern void sched_clock_init(void); 1622extern u64 sched_clock_cpu(int cpu); 1623 1624#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1625static inline void sched_clock_tick(void) 1626{ 1627} 1628 1629static inline void sched_clock_idle_sleep_event(void) 1630{ 1631} 1632 1633static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 1634{ 1635} 1636#else 1637extern void sched_clock_tick(void); 1638extern void sched_clock_idle_sleep_event(void); 1639extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1640#endif 1641 1642/* 1643 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 1644 * clock constructed from sched_clock(): 1645 */ 1646extern unsigned long long cpu_clock(int cpu); 1647 1648extern unsigned long long 1649task_sched_runtime(struct task_struct *task); 1650extern unsigned long long thread_group_sched_runtime(struct task_struct *task); 1651 1652/* sched_exec is called by processes performing an exec */ 1653#ifdef CONFIG_SMP 1654extern void sched_exec(void); 1655#else 1656#define sched_exec() {} 1657#endif 1658 1659extern void sched_clock_idle_sleep_event(void); 1660extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1661 1662#ifdef CONFIG_HOTPLUG_CPU 1663extern void idle_task_exit(void); 1664#else 1665static inline void idle_task_exit(void) {} 1666#endif 1667 1668extern void sched_idle_next(void); 1669 1670#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 1671extern void wake_up_idle_cpu(int cpu); 1672#else 1673static inline void wake_up_idle_cpu(int cpu) { } 1674#endif 1675 1676#ifdef CONFIG_SCHED_DEBUG 1677extern unsigned int sysctl_sched_latency; 1678extern unsigned int sysctl_sched_min_granularity; 1679extern unsigned int sysctl_sched_wakeup_granularity; 1680extern unsigned int sysctl_sched_child_runs_first; 1681extern unsigned int sysctl_sched_features; 1682extern unsigned int sysctl_sched_migration_cost; 1683extern unsigned int sysctl_sched_nr_migrate; 1684extern unsigned int sysctl_sched_shares_ratelimit; 1685extern unsigned int sysctl_sched_shares_thresh; 1686 1687int sched_nr_latency_handler(struct ctl_table *table, int write, 1688 struct file *file, void __user *buffer, size_t *length, 1689 loff_t *ppos); 1690#endif 1691extern unsigned int sysctl_sched_rt_period; 1692extern int sysctl_sched_rt_runtime; 1693 1694int sched_rt_handler(struct ctl_table *table, int write, 1695 struct file *filp, void __user *buffer, size_t *lenp, 1696 loff_t *ppos); 1697 1698extern unsigned int sysctl_sched_compat_yield; 1699 1700#ifdef CONFIG_RT_MUTEXES 1701extern int rt_mutex_getprio(struct task_struct *p); 1702extern void rt_mutex_setprio(struct task_struct *p, int prio); 1703extern void rt_mutex_adjust_pi(struct task_struct *p); 1704#else 1705static inline int rt_mutex_getprio(struct task_struct *p) 1706{ 1707 return p->normal_prio; 1708} 1709# define rt_mutex_adjust_pi(p) do { } while (0) 1710#endif 1711 1712extern void set_user_nice(struct task_struct *p, long nice); 1713extern int task_prio(const struct task_struct *p); 1714extern int task_nice(const struct task_struct *p); 1715extern int can_nice(const struct task_struct *p, const int nice); 1716extern int task_curr(const struct task_struct *p); 1717extern int idle_cpu(int cpu); 1718extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1719extern int sched_setscheduler_nocheck(struct task_struct *, int, 1720 struct sched_param *); 1721extern struct task_struct *idle_task(int cpu); 1722extern struct task_struct *curr_task(int cpu); 1723extern void set_curr_task(int cpu, struct task_struct *p); 1724 1725void yield(void); 1726 1727/* 1728 * The default (Linux) execution domain. 1729 */ 1730extern struct exec_domain default_exec_domain; 1731 1732union thread_union { 1733 struct thread_info thread_info; 1734 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1735}; 1736 1737#ifndef __HAVE_ARCH_KSTACK_END 1738static inline int kstack_end(void *addr) 1739{ 1740 /* Reliable end of stack detection: 1741 * Some APM bios versions misalign the stack 1742 */ 1743 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 1744} 1745#endif 1746 1747extern union thread_union init_thread_union; 1748extern struct task_struct init_task; 1749 1750extern struct mm_struct init_mm; 1751 1752extern struct pid_namespace init_pid_ns; 1753 1754/* 1755 * find a task by one of its numerical ids 1756 * 1757 * find_task_by_pid_type_ns(): 1758 * it is the most generic call - it finds a task by all id, 1759 * type and namespace specified 1760 * find_task_by_pid_ns(): 1761 * finds a task by its pid in the specified namespace 1762 * find_task_by_vpid(): 1763 * finds a task by its virtual pid 1764 * 1765 * see also find_vpid() etc in include/linux/pid.h 1766 */ 1767 1768extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, 1769 struct pid_namespace *ns); 1770 1771extern struct task_struct *find_task_by_vpid(pid_t nr); 1772extern struct task_struct *find_task_by_pid_ns(pid_t nr, 1773 struct pid_namespace *ns); 1774 1775extern void __set_special_pids(struct pid *pid); 1776 1777/* per-UID process charging. */ 1778extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); 1779static inline struct user_struct *get_uid(struct user_struct *u) 1780{ 1781 atomic_inc(&u->__count); 1782 return u; 1783} 1784extern void free_uid(struct user_struct *); 1785extern void switch_uid(struct user_struct *); 1786extern void release_uids(struct user_namespace *ns); 1787 1788#include <asm/current.h> 1789 1790extern void do_timer(unsigned long ticks); 1791 1792extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1793extern int wake_up_process(struct task_struct *tsk); 1794extern void wake_up_new_task(struct task_struct *tsk, 1795 unsigned long clone_flags); 1796#ifdef CONFIG_SMP 1797 extern void kick_process(struct task_struct *tsk); 1798#else 1799 static inline void kick_process(struct task_struct *tsk) { } 1800#endif 1801extern void sched_fork(struct task_struct *p, int clone_flags); 1802extern void sched_dead(struct task_struct *p); 1803 1804extern int in_group_p(gid_t); 1805extern int in_egroup_p(gid_t); 1806 1807extern void proc_caches_init(void); 1808extern void flush_signals(struct task_struct *); 1809extern void ignore_signals(struct task_struct *); 1810extern void flush_signal_handlers(struct task_struct *, int force_default); 1811extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 1812 1813static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 1814{ 1815 unsigned long flags; 1816 int ret; 1817 1818 spin_lock_irqsave(&tsk->sighand->siglock, flags); 1819 ret = dequeue_signal(tsk, mask, info); 1820 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 1821 1822 return ret; 1823} 1824 1825extern void block_all_signals(int (*notifier)(void *priv), void *priv, 1826 sigset_t *mask); 1827extern void unblock_all_signals(void); 1828extern void release_task(struct task_struct * p); 1829extern int send_sig_info(int, struct siginfo *, struct task_struct *); 1830extern int force_sigsegv(int, struct task_struct *); 1831extern int force_sig_info(int, struct siginfo *, struct task_struct *); 1832extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 1833extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 1834extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32); 1835extern int kill_pgrp(struct pid *pid, int sig, int priv); 1836extern int kill_pid(struct pid *pid, int sig, int priv); 1837extern int kill_proc_info(int, struct siginfo *, pid_t); 1838extern int do_notify_parent(struct task_struct *, int); 1839extern void force_sig(int, struct task_struct *); 1840extern void force_sig_specific(int, struct task_struct *); 1841extern int send_sig(int, struct task_struct *, int); 1842extern void zap_other_threads(struct task_struct *p); 1843extern struct sigqueue *sigqueue_alloc(void); 1844extern void sigqueue_free(struct sigqueue *); 1845extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 1846extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 1847extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); 1848 1849static inline int kill_cad_pid(int sig, int priv) 1850{ 1851 return kill_pid(cad_pid, sig, priv); 1852} 1853 1854/* These can be the second arg to send_sig_info/send_group_sig_info. */ 1855#define SEND_SIG_NOINFO ((struct siginfo *) 0) 1856#define SEND_SIG_PRIV ((struct siginfo *) 1) 1857#define SEND_SIG_FORCED ((struct siginfo *) 2) 1858 1859static inline int is_si_special(const struct siginfo *info) 1860{ 1861 return info <= SEND_SIG_FORCED; 1862} 1863 1864/* True if we are on the alternate signal stack. */ 1865 1866static inline int on_sig_stack(unsigned long sp) 1867{ 1868 return (sp - current->sas_ss_sp < current->sas_ss_size); 1869} 1870 1871static inline int sas_ss_flags(unsigned long sp) 1872{ 1873 return (current->sas_ss_size == 0 ? SS_DISABLE 1874 : on_sig_stack(sp) ? SS_ONSTACK : 0); 1875} 1876 1877/* 1878 * Routines for handling mm_structs 1879 */ 1880extern struct mm_struct * mm_alloc(void); 1881 1882/* mmdrop drops the mm and the page tables */ 1883extern void __mmdrop(struct mm_struct *); 1884static inline void mmdrop(struct mm_struct * mm) 1885{ 1886 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 1887 __mmdrop(mm); 1888} 1889 1890/* mmput gets rid of the mappings and all user-space */ 1891extern void mmput(struct mm_struct *); 1892/* Grab a reference to a task's mm, if it is not already going away */ 1893extern struct mm_struct *get_task_mm(struct task_struct *task); 1894/* Remove the current tasks stale references to the old mm_struct */ 1895extern void mm_release(struct task_struct *, struct mm_struct *); 1896/* Allocate a new mm structure and copy contents from tsk->mm */ 1897extern struct mm_struct *dup_mm(struct task_struct *tsk); 1898 1899extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1900extern void flush_thread(void); 1901extern void exit_thread(void); 1902 1903extern void exit_files(struct task_struct *); 1904extern void __cleanup_signal(struct signal_struct *); 1905extern void __cleanup_sighand(struct sighand_struct *); 1906 1907extern void exit_itimers(struct signal_struct *); 1908extern void flush_itimer_signals(void); 1909 1910extern NORET_TYPE void do_group_exit(int); 1911 1912extern void daemonize(const char *, ...); 1913extern int allow_signal(int); 1914extern int disallow_signal(int); 1915 1916extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1917extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1918struct task_struct *fork_idle(int); 1919 1920extern void set_task_comm(struct task_struct *tsk, char *from); 1921extern char *get_task_comm(char *to, struct task_struct *tsk); 1922 1923#ifdef CONFIG_SMP 1924extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 1925#else 1926static inline unsigned long wait_task_inactive(struct task_struct *p, 1927 long match_state) 1928{ 1929 return 1; 1930} 1931#endif 1932 1933#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 1934 1935#define for_each_process(p) \ 1936 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 1937 1938/* 1939 * Careful: do_each_thread/while_each_thread is a double loop so 1940 * 'break' will not work as expected - use goto instead. 1941 */ 1942#define do_each_thread(g, t) \ 1943 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 1944 1945#define while_each_thread(g, t) \ 1946 while ((t = next_thread(t)) != g) 1947 1948/* de_thread depends on thread_group_leader not being a pid based check */ 1949#define thread_group_leader(p) (p == p->group_leader) 1950 1951/* Do to the insanities of de_thread it is possible for a process 1952 * to have the pid of the thread group leader without actually being 1953 * the thread group leader. For iteration through the pids in proc 1954 * all we care about is that we have a task with the appropriate 1955 * pid, we don't actually care if we have the right task. 1956 */ 1957static inline int has_group_leader_pid(struct task_struct *p) 1958{ 1959 return p->pid == p->tgid; 1960} 1961 1962static inline 1963int same_thread_group(struct task_struct *p1, struct task_struct *p2) 1964{ 1965 return p1->tgid == p2->tgid; 1966} 1967 1968static inline struct task_struct *next_thread(const struct task_struct *p) 1969{ 1970 return list_entry(rcu_dereference(p->thread_group.next), 1971 struct task_struct, thread_group); 1972} 1973 1974static inline int thread_group_empty(struct task_struct *p) 1975{ 1976 return list_empty(&p->thread_group); 1977} 1978 1979#define delay_group_leader(p) \ 1980 (thread_group_leader(p) && !thread_group_empty(p)) 1981 1982/* 1983 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 1984 * subscriptions and synchronises with wait4(). Also used in procfs. Also 1985 * pins the final release of task.io_context. Also protects ->cpuset and 1986 * ->cgroup.subsys[]. 1987 * 1988 * Nests both inside and outside of read_lock(&tasklist_lock). 1989 * It must not be nested with write_lock_irq(&tasklist_lock), 1990 * neither inside nor outside. 1991 */ 1992static inline void task_lock(struct task_struct *p) 1993{ 1994 spin_lock(&p->alloc_lock); 1995} 1996 1997static inline void task_unlock(struct task_struct *p) 1998{ 1999 spin_unlock(&p->alloc_lock); 2000} 2001 2002extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, 2003 unsigned long *flags); 2004 2005static inline void unlock_task_sighand(struct task_struct *tsk, 2006 unsigned long *flags) 2007{ 2008 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2009} 2010 2011#ifndef __HAVE_THREAD_FUNCTIONS 2012 2013#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2014#define task_stack_page(task) ((task)->stack) 2015 2016static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 2017{ 2018 *task_thread_info(p) = *task_thread_info(org); 2019 task_thread_info(p)->task = p; 2020} 2021 2022static inline unsigned long *end_of_stack(struct task_struct *p) 2023{ 2024 return (unsigned long *)(task_thread_info(p) + 1); 2025} 2026 2027#endif 2028 2029static inline int object_is_on_stack(void *obj) 2030{ 2031 void *stack = task_stack_page(current); 2032 2033 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 2034} 2035 2036extern void thread_info_cache_init(void); 2037 2038/* set thread flags in other task's structures 2039 * - see asm/thread_info.h for TIF_xxxx flags available 2040 */ 2041static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 2042{ 2043 set_ti_thread_flag(task_thread_info(tsk), flag); 2044} 2045 2046static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2047{ 2048 clear_ti_thread_flag(task_thread_info(tsk), flag); 2049} 2050 2051static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2052{ 2053 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2054} 2055 2056static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2057{ 2058 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2059} 2060 2061static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2062{ 2063 return test_ti_thread_flag(task_thread_info(tsk), flag); 2064} 2065 2066static inline void set_tsk_need_resched(struct task_struct *tsk) 2067{ 2068 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2069} 2070 2071static inline void clear_tsk_need_resched(struct task_struct *tsk) 2072{ 2073 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2074} 2075 2076static inline int test_tsk_need_resched(struct task_struct *tsk) 2077{ 2078 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2079} 2080 2081static inline int signal_pending(struct task_struct *p) 2082{ 2083 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2084} 2085 2086extern int __fatal_signal_pending(struct task_struct *p); 2087 2088static inline int fatal_signal_pending(struct task_struct *p) 2089{ 2090 return signal_pending(p) && __fatal_signal_pending(p); 2091} 2092 2093static inline int signal_pending_state(long state, struct task_struct *p) 2094{ 2095 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 2096 return 0; 2097 if (!signal_pending(p)) 2098 return 0; 2099 2100 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2101} 2102 2103static inline int need_resched(void) 2104{ 2105 return unlikely(test_thread_flag(TIF_NEED_RESCHED)); 2106} 2107 2108/* 2109 * cond_resched() and cond_resched_lock(): latency reduction via 2110 * explicit rescheduling in places that are safe. The return 2111 * value indicates whether a reschedule was done in fact. 2112 * cond_resched_lock() will drop the spinlock before scheduling, 2113 * cond_resched_softirq() will enable bhs before scheduling. 2114 */ 2115extern int _cond_resched(void); 2116#ifdef CONFIG_PREEMPT_BKL 2117static inline int cond_resched(void) 2118{ 2119 return 0; 2120} 2121#else 2122static inline int cond_resched(void) 2123{ 2124 return _cond_resched(); 2125} 2126#endif 2127extern int cond_resched_lock(spinlock_t * lock); 2128extern int cond_resched_softirq(void); 2129static inline int cond_resched_bkl(void) 2130{ 2131 return _cond_resched(); 2132} 2133 2134/* 2135 * Does a critical section need to be broken due to another 2136 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 2137 * but a general need for low latency) 2138 */ 2139static inline int spin_needbreak(spinlock_t *lock) 2140{ 2141#ifdef CONFIG_PREEMPT 2142 return spin_is_contended(lock); 2143#else 2144 return 0; 2145#endif 2146} 2147 2148/* 2149 * Thread group CPU time accounting. 2150 */ 2151 2152extern int thread_group_cputime_alloc(struct task_struct *); 2153extern void thread_group_cputime(struct task_struct *, struct task_cputime *); 2154 2155static inline void thread_group_cputime_init(struct signal_struct *sig) 2156{ 2157 sig->cputime.totals = NULL; 2158} 2159 2160static inline int thread_group_cputime_clone_thread(struct task_struct *curr) 2161{ 2162 if (curr->signal->cputime.totals) 2163 return 0; 2164 return thread_group_cputime_alloc(curr); 2165} 2166 2167static inline void thread_group_cputime_free(struct signal_struct *sig) 2168{ 2169 free_percpu(sig->cputime.totals); 2170} 2171 2172/* 2173 * Reevaluate whether the task has signals pending delivery. 2174 * Wake the task if so. 2175 * This is required every time the blocked sigset_t changes. 2176 * callers must hold sighand->siglock. 2177 */ 2178extern void recalc_sigpending_and_wake(struct task_struct *t); 2179extern void recalc_sigpending(void); 2180 2181extern void signal_wake_up(struct task_struct *t, int resume_stopped); 2182 2183/* 2184 * Wrappers for p->thread_info->cpu access. No-op on UP. 2185 */ 2186#ifdef CONFIG_SMP 2187 2188static inline unsigned int task_cpu(const struct task_struct *p) 2189{ 2190 return task_thread_info(p)->cpu; 2191} 2192 2193extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2194 2195#else 2196 2197static inline unsigned int task_cpu(const struct task_struct *p) 2198{ 2199 return 0; 2200} 2201 2202static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2203{ 2204} 2205 2206#endif /* CONFIG_SMP */ 2207 2208extern void arch_pick_mmap_layout(struct mm_struct *mm); 2209 2210#ifdef CONFIG_TRACING 2211extern void 2212__trace_special(void *__tr, void *__data, 2213 unsigned long arg1, unsigned long arg2, unsigned long arg3); 2214#else 2215static inline void 2216__trace_special(void *__tr, void *__data, 2217 unsigned long arg1, unsigned long arg2, unsigned long arg3) 2218{ 2219} 2220#endif 2221 2222extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2223extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2224 2225extern int sched_mc_power_savings, sched_smt_power_savings; 2226 2227extern void normalize_rt_tasks(void); 2228 2229#ifdef CONFIG_GROUP_SCHED 2230 2231extern struct task_group init_task_group; 2232#ifdef CONFIG_USER_SCHED 2233extern struct task_group root_task_group; 2234#endif 2235 2236extern struct task_group *sched_create_group(struct task_group *parent); 2237extern void sched_destroy_group(struct task_group *tg); 2238extern void sched_move_task(struct task_struct *tsk); 2239#ifdef CONFIG_FAIR_GROUP_SCHED 2240extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 2241extern unsigned long sched_group_shares(struct task_group *tg); 2242#endif 2243#ifdef CONFIG_RT_GROUP_SCHED 2244extern int sched_group_set_rt_runtime(struct task_group *tg, 2245 long rt_runtime_us); 2246extern long sched_group_rt_runtime(struct task_group *tg); 2247extern int sched_group_set_rt_period(struct task_group *tg, 2248 long rt_period_us); 2249extern long sched_group_rt_period(struct task_group *tg); 2250#endif 2251#endif 2252 2253#ifdef CONFIG_TASK_XACCT 2254static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2255{ 2256 tsk->ioac.rchar += amt; 2257} 2258 2259static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2260{ 2261 tsk->ioac.wchar += amt; 2262} 2263 2264static inline void inc_syscr(struct task_struct *tsk) 2265{ 2266 tsk->ioac.syscr++; 2267} 2268 2269static inline void inc_syscw(struct task_struct *tsk) 2270{ 2271 tsk->ioac.syscw++; 2272} 2273#else 2274static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2275{ 2276} 2277 2278static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2279{ 2280} 2281 2282static inline void inc_syscr(struct task_struct *tsk) 2283{ 2284} 2285 2286static inline void inc_syscw(struct task_struct *tsk) 2287{ 2288} 2289#endif 2290 2291#ifndef TASK_SIZE_OF 2292#define TASK_SIZE_OF(tsk) TASK_SIZE 2293#endif 2294 2295#ifdef CONFIG_MM_OWNER 2296extern void mm_update_next_owner(struct mm_struct *mm); 2297extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2298#else 2299static inline void mm_update_next_owner(struct mm_struct *mm) 2300{ 2301} 2302 2303static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 2304{ 2305} 2306#endif /* CONFIG_MM_OWNER */ 2307 2308#define TASK_STATE_TO_CHAR_STR "RSDTtZX" 2309 2310#endif /* __KERNEL__ */ 2311 2312#endif 2313