sched.h revision 0326f5a94ddea33fa331b2519f4172f4fb387baa
1#ifndef _LINUX_SCHED_H 2#define _LINUX_SCHED_H 3 4/* 5 * cloning flags: 6 */ 7#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ 8#define CLONE_VM 0x00000100 /* set if VM shared between processes */ 9#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ 10#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ 11#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ 12#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ 13#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 14#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ 15#define CLONE_THREAD 0x00010000 /* Same thread group? */ 16#define CLONE_NEWNS 0x00020000 /* New namespace group? */ 17#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ 18#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ 19#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ 20#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ 21#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ 22#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ 23#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ 24/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state) 25 and is now available for re-use. */ 26#define CLONE_NEWUTS 0x04000000 /* New utsname group? */ 27#define CLONE_NEWIPC 0x08000000 /* New ipcs */ 28#define CLONE_NEWUSER 0x10000000 /* New user namespace */ 29#define CLONE_NEWPID 0x20000000 /* New pid namespace */ 30#define CLONE_NEWNET 0x40000000 /* New network namespace */ 31#define CLONE_IO 0x80000000 /* Clone io context */ 32 33/* 34 * Scheduling policies 35 */ 36#define SCHED_NORMAL 0 37#define SCHED_FIFO 1 38#define SCHED_RR 2 39#define SCHED_BATCH 3 40/* SCHED_ISO: reserved but not implemented yet */ 41#define SCHED_IDLE 5 42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ 43#define SCHED_RESET_ON_FORK 0x40000000 44 45#ifdef __KERNEL__ 46 47struct sched_param { 48 int sched_priority; 49}; 50 51#include <asm/param.h> /* for HZ */ 52 53#include <linux/capability.h> 54#include <linux/threads.h> 55#include <linux/kernel.h> 56#include <linux/types.h> 57#include <linux/timex.h> 58#include <linux/jiffies.h> 59#include <linux/rbtree.h> 60#include <linux/thread_info.h> 61#include <linux/cpumask.h> 62#include <linux/errno.h> 63#include <linux/nodemask.h> 64#include <linux/mm_types.h> 65 66#include <asm/system.h> 67#include <asm/page.h> 68#include <asm/ptrace.h> 69#include <asm/cputime.h> 70 71#include <linux/smp.h> 72#include <linux/sem.h> 73#include <linux/signal.h> 74#include <linux/compiler.h> 75#include <linux/completion.h> 76#include <linux/pid.h> 77#include <linux/percpu.h> 78#include <linux/topology.h> 79#include <linux/proportions.h> 80#include <linux/seccomp.h> 81#include <linux/rcupdate.h> 82#include <linux/rculist.h> 83#include <linux/rtmutex.h> 84 85#include <linux/time.h> 86#include <linux/param.h> 87#include <linux/resource.h> 88#include <linux/timer.h> 89#include <linux/hrtimer.h> 90#include <linux/task_io_accounting.h> 91#include <linux/latencytop.h> 92#include <linux/cred.h> 93#include <linux/llist.h> 94 95#include <asm/processor.h> 96 97struct exec_domain; 98struct futex_pi_state; 99struct robust_list_head; 100struct bio_list; 101struct fs_struct; 102struct perf_event_context; 103struct blk_plug; 104 105/* 106 * List of flags we want to share for kernel threads, 107 * if only because they are not used by them anyway. 108 */ 109#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) 110 111/* 112 * These are the constant used to fake the fixed-point load-average 113 * counting. Some notes: 114 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 115 * a load-average precision of 10 bits integer + 11 bits fractional 116 * - if you want to count load-averages more often, you need more 117 * precision, or rounding will get you. With 2-second counting freq, 118 * the EXP_n values would be 1981, 2034 and 2043 if still using only 119 * 11 bit fractions. 120 */ 121extern unsigned long avenrun[]; /* Load averages */ 122extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); 123 124#define FSHIFT 11 /* nr of bits of precision */ 125#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 126#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ 127#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 128#define EXP_5 2014 /* 1/exp(5sec/5min) */ 129#define EXP_15 2037 /* 1/exp(5sec/15min) */ 130 131#define CALC_LOAD(load,exp,n) \ 132 load *= exp; \ 133 load += n*(FIXED_1-exp); \ 134 load >>= FSHIFT; 135 136extern unsigned long total_forks; 137extern int nr_threads; 138DECLARE_PER_CPU(unsigned long, process_counts); 139extern int nr_processes(void); 140extern unsigned long nr_running(void); 141extern unsigned long nr_uninterruptible(void); 142extern unsigned long nr_iowait(void); 143extern unsigned long nr_iowait_cpu(int cpu); 144extern unsigned long this_cpu_load(void); 145 146 147extern void calc_global_load(unsigned long ticks); 148 149extern unsigned long get_parent_ip(unsigned long addr); 150 151struct seq_file; 152struct cfs_rq; 153struct task_group; 154#ifdef CONFIG_SCHED_DEBUG 155extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 156extern void proc_sched_set_task(struct task_struct *p); 157extern void 158print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 159#else 160static inline void 161proc_sched_show_task(struct task_struct *p, struct seq_file *m) 162{ 163} 164static inline void proc_sched_set_task(struct task_struct *p) 165{ 166} 167static inline void 168print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 169{ 170} 171#endif 172 173/* 174 * Task state bitmask. NOTE! These bits are also 175 * encoded in fs/proc/array.c: get_task_state(). 176 * 177 * We have two separate sets of flags: task->state 178 * is about runnability, while task->exit_state are 179 * about the task exiting. Confusing, but this way 180 * modifying one set can't modify the other one by 181 * mistake. 182 */ 183#define TASK_RUNNING 0 184#define TASK_INTERRUPTIBLE 1 185#define TASK_UNINTERRUPTIBLE 2 186#define __TASK_STOPPED 4 187#define __TASK_TRACED 8 188/* in tsk->exit_state */ 189#define EXIT_ZOMBIE 16 190#define EXIT_DEAD 32 191/* in tsk->state again */ 192#define TASK_DEAD 64 193#define TASK_WAKEKILL 128 194#define TASK_WAKING 256 195#define TASK_STATE_MAX 512 196 197#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" 198 199extern char ___assert_task_state[1 - 2*!!( 200 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 201 202/* Convenience macros for the sake of set_task_state */ 203#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 204#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 205#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 206 207/* Convenience macros for the sake of wake_up */ 208#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 209#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 210 211/* get_task_state() */ 212#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 213 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 214 __TASK_TRACED) 215 216#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 217#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 218#define task_is_dead(task) ((task)->exit_state != 0) 219#define task_is_stopped_or_traced(task) \ 220 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 221#define task_contributes_to_load(task) \ 222 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 223 (task->flags & PF_FROZEN) == 0) 224 225#define __set_task_state(tsk, state_value) \ 226 do { (tsk)->state = (state_value); } while (0) 227#define set_task_state(tsk, state_value) \ 228 set_mb((tsk)->state, (state_value)) 229 230/* 231 * set_current_state() includes a barrier so that the write of current->state 232 * is correctly serialised wrt the caller's subsequent test of whether to 233 * actually sleep: 234 * 235 * set_current_state(TASK_UNINTERRUPTIBLE); 236 * if (do_i_need_to_sleep()) 237 * schedule(); 238 * 239 * If the caller does not need such serialisation then use __set_current_state() 240 */ 241#define __set_current_state(state_value) \ 242 do { current->state = (state_value); } while (0) 243#define set_current_state(state_value) \ 244 set_mb(current->state, (state_value)) 245 246/* Task command name length */ 247#define TASK_COMM_LEN 16 248 249#include <linux/spinlock.h> 250 251/* 252 * This serializes "schedule()" and also protects 253 * the run-queue from deletions/modifications (but 254 * _adding_ to the beginning of the run-queue has 255 * a separate lock). 256 */ 257extern rwlock_t tasklist_lock; 258extern spinlock_t mmlist_lock; 259 260struct task_struct; 261 262#ifdef CONFIG_PROVE_RCU 263extern int lockdep_tasklist_lock_is_held(void); 264#endif /* #ifdef CONFIG_PROVE_RCU */ 265 266extern void sched_init(void); 267extern void sched_init_smp(void); 268extern asmlinkage void schedule_tail(struct task_struct *prev); 269extern void init_idle(struct task_struct *idle, int cpu); 270extern void init_idle_bootup_task(struct task_struct *idle); 271 272extern int runqueue_is_locked(int cpu); 273 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 275extern void select_nohz_load_balancer(int stop_tick); 276extern void set_cpu_sd_state_idle(void); 277extern int get_nohz_timer_target(void); 278#else 279static inline void select_nohz_load_balancer(int stop_tick) { } 280static inline void set_cpu_sd_state_idle(void) { } 281#endif 282 283/* 284 * Only dump TASK_* tasks. (0 for all tasks) 285 */ 286extern void show_state_filter(unsigned long state_filter); 287 288static inline void show_state(void) 289{ 290 show_state_filter(0); 291} 292 293extern void show_regs(struct pt_regs *); 294 295/* 296 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current 297 * task), SP is the stack pointer of the first frame that should be shown in the back 298 * trace (or NULL if the entire call-chain of the task should be shown). 299 */ 300extern void show_stack(struct task_struct *task, unsigned long *sp); 301 302void io_schedule(void); 303long io_schedule_timeout(long timeout); 304 305extern void cpu_init (void); 306extern void trap_init(void); 307extern void update_process_times(int user); 308extern void scheduler_tick(void); 309 310extern void sched_show_task(struct task_struct *p); 311 312#ifdef CONFIG_LOCKUP_DETECTOR 313extern void touch_softlockup_watchdog(void); 314extern void touch_softlockup_watchdog_sync(void); 315extern void touch_all_softlockup_watchdogs(void); 316extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, 317 void __user *buffer, 318 size_t *lenp, loff_t *ppos); 319extern unsigned int softlockup_panic; 320void lockup_detector_init(void); 321#else 322static inline void touch_softlockup_watchdog(void) 323{ 324} 325static inline void touch_softlockup_watchdog_sync(void) 326{ 327} 328static inline void touch_all_softlockup_watchdogs(void) 329{ 330} 331static inline void lockup_detector_init(void) 332{ 333} 334#endif 335 336#ifdef CONFIG_DETECT_HUNG_TASK 337extern unsigned int sysctl_hung_task_panic; 338extern unsigned long sysctl_hung_task_check_count; 339extern unsigned long sysctl_hung_task_timeout_secs; 340extern unsigned long sysctl_hung_task_warnings; 341extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 342 void __user *buffer, 343 size_t *lenp, loff_t *ppos); 344#else 345/* Avoid need for ifdefs elsewhere in the code */ 346enum { sysctl_hung_task_timeout_secs = 0 }; 347#endif 348 349/* Attach to any functions which should be ignored in wchan output. */ 350#define __sched __attribute__((__section__(".sched.text"))) 351 352/* Linker adds these: start and end of __sched functions */ 353extern char __sched_text_start[], __sched_text_end[]; 354 355/* Is this address in the __sched functions? */ 356extern int in_sched_functions(unsigned long addr); 357 358#define MAX_SCHEDULE_TIMEOUT LONG_MAX 359extern signed long schedule_timeout(signed long timeout); 360extern signed long schedule_timeout_interruptible(signed long timeout); 361extern signed long schedule_timeout_killable(signed long timeout); 362extern signed long schedule_timeout_uninterruptible(signed long timeout); 363asmlinkage void schedule(void); 364extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); 365 366struct nsproxy; 367struct user_namespace; 368 369/* 370 * Default maximum number of active map areas, this limits the number of vmas 371 * per mm struct. Users can overwrite this number by sysctl but there is a 372 * problem. 373 * 374 * When a program's coredump is generated as ELF format, a section is created 375 * per a vma. In ELF, the number of sections is represented in unsigned short. 376 * This means the number of sections should be smaller than 65535 at coredump. 377 * Because the kernel adds some informative sections to a image of program at 378 * generating coredump, we need some margin. The number of extra sections is 379 * 1-3 now and depends on arch. We use "5" as safe margin, here. 380 */ 381#define MAPCOUNT_ELF_CORE_MARGIN (5) 382#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 383 384extern int sysctl_max_map_count; 385 386#include <linux/aio.h> 387 388#ifdef CONFIG_MMU 389extern void arch_pick_mmap_layout(struct mm_struct *mm); 390extern unsigned long 391arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 392 unsigned long, unsigned long); 393extern unsigned long 394arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 395 unsigned long len, unsigned long pgoff, 396 unsigned long flags); 397extern void arch_unmap_area(struct mm_struct *, unsigned long); 398extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 399#else 400static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 401#endif 402 403 404extern void set_dumpable(struct mm_struct *mm, int value); 405extern int get_dumpable(struct mm_struct *mm); 406 407/* mm flags */ 408/* dumpable bits */ 409#define MMF_DUMPABLE 0 /* core dump is permitted */ 410#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ 411 412#define MMF_DUMPABLE_BITS 2 413#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) 414 415/* coredump filter bits */ 416#define MMF_DUMP_ANON_PRIVATE 2 417#define MMF_DUMP_ANON_SHARED 3 418#define MMF_DUMP_MAPPED_PRIVATE 4 419#define MMF_DUMP_MAPPED_SHARED 5 420#define MMF_DUMP_ELF_HEADERS 6 421#define MMF_DUMP_HUGETLB_PRIVATE 7 422#define MMF_DUMP_HUGETLB_SHARED 8 423 424#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 425#define MMF_DUMP_FILTER_BITS 7 426#define MMF_DUMP_FILTER_MASK \ 427 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 428#define MMF_DUMP_FILTER_DEFAULT \ 429 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ 430 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) 431 432#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS 433# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) 434#else 435# define MMF_DUMP_MASK_DEFAULT_ELF 0 436#endif 437 /* leave room for more dump flags */ 438#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ 439#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ 440 441#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) 442 443struct sighand_struct { 444 atomic_t count; 445 struct k_sigaction action[_NSIG]; 446 spinlock_t siglock; 447 wait_queue_head_t signalfd_wqh; 448}; 449 450struct pacct_struct { 451 int ac_flag; 452 long ac_exitcode; 453 unsigned long ac_mem; 454 cputime_t ac_utime, ac_stime; 455 unsigned long ac_minflt, ac_majflt; 456}; 457 458struct cpu_itimer { 459 cputime_t expires; 460 cputime_t incr; 461 u32 error; 462 u32 incr_error; 463}; 464 465/** 466 * struct task_cputime - collected CPU time counts 467 * @utime: time spent in user mode, in &cputime_t units 468 * @stime: time spent in kernel mode, in &cputime_t units 469 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 470 * 471 * This structure groups together three kinds of CPU time that are 472 * tracked for threads and thread groups. Most things considering 473 * CPU time want to group these counts together and treat all three 474 * of them in parallel. 475 */ 476struct task_cputime { 477 cputime_t utime; 478 cputime_t stime; 479 unsigned long long sum_exec_runtime; 480}; 481/* Alternate field names when used to cache expirations. */ 482#define prof_exp stime 483#define virt_exp utime 484#define sched_exp sum_exec_runtime 485 486#define INIT_CPUTIME \ 487 (struct task_cputime) { \ 488 .utime = 0, \ 489 .stime = 0, \ 490 .sum_exec_runtime = 0, \ 491 } 492 493/* 494 * Disable preemption until the scheduler is running. 495 * Reset by start_kernel()->sched_init()->init_idle(). 496 * 497 * We include PREEMPT_ACTIVE to avoid cond_resched() from working 498 * before the scheduler is active -- see should_resched(). 499 */ 500#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) 501 502/** 503 * struct thread_group_cputimer - thread group interval timer counts 504 * @cputime: thread group interval timers. 505 * @running: non-zero when there are timers running and 506 * @cputime receives updates. 507 * @lock: lock for fields in this struct. 508 * 509 * This structure contains the version of task_cputime, above, that is 510 * used for thread group CPU timer calculations. 511 */ 512struct thread_group_cputimer { 513 struct task_cputime cputime; 514 int running; 515 raw_spinlock_t lock; 516}; 517 518#include <linux/rwsem.h> 519struct autogroup; 520 521/* 522 * NOTE! "signal_struct" does not have its own 523 * locking, because a shared signal_struct always 524 * implies a shared sighand_struct, so locking 525 * sighand_struct is always a proper superset of 526 * the locking of signal_struct. 527 */ 528struct signal_struct { 529 atomic_t sigcnt; 530 atomic_t live; 531 int nr_threads; 532 533 wait_queue_head_t wait_chldexit; /* for wait4() */ 534 535 /* current thread group signal load-balancing target: */ 536 struct task_struct *curr_target; 537 538 /* shared signal handling: */ 539 struct sigpending shared_pending; 540 541 /* thread group exit support */ 542 int group_exit_code; 543 /* overloaded: 544 * - notify group_exit_task when ->count is equal to notify_count 545 * - everyone except group_exit_task is stopped during signal delivery 546 * of fatal signals, group_exit_task processes the signal. 547 */ 548 int notify_count; 549 struct task_struct *group_exit_task; 550 551 /* thread group stop support, overloads group_exit_code too */ 552 int group_stop_count; 553 unsigned int flags; /* see SIGNAL_* flags below */ 554 555 /* POSIX.1b Interval Timers */ 556 struct list_head posix_timers; 557 558 /* ITIMER_REAL timer for the process */ 559 struct hrtimer real_timer; 560 struct pid *leader_pid; 561 ktime_t it_real_incr; 562 563 /* 564 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use 565 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these 566 * values are defined to 0 and 1 respectively 567 */ 568 struct cpu_itimer it[2]; 569 570 /* 571 * Thread group totals for process CPU timers. 572 * See thread_group_cputimer(), et al, for details. 573 */ 574 struct thread_group_cputimer cputimer; 575 576 /* Earliest-expiration cache. */ 577 struct task_cputime cputime_expires; 578 579 struct list_head cpu_timers[3]; 580 581 struct pid *tty_old_pgrp; 582 583 /* boolean value for session group leader */ 584 int leader; 585 586 struct tty_struct *tty; /* NULL if no tty */ 587 588#ifdef CONFIG_SCHED_AUTOGROUP 589 struct autogroup *autogroup; 590#endif 591 /* 592 * Cumulative resource counters for dead threads in the group, 593 * and for reaped dead child processes forked by this group. 594 * Live threads maintain their own counters and add to these 595 * in __exit_signal, except for the group leader. 596 */ 597 cputime_t utime, stime, cutime, cstime; 598 cputime_t gtime; 599 cputime_t cgtime; 600#ifndef CONFIG_VIRT_CPU_ACCOUNTING 601 cputime_t prev_utime, prev_stime; 602#endif 603 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 604 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 605 unsigned long inblock, oublock, cinblock, coublock; 606 unsigned long maxrss, cmaxrss; 607 struct task_io_accounting ioac; 608 609 /* 610 * Cumulative ns of schedule CPU time fo dead threads in the 611 * group, not including a zombie group leader, (This only differs 612 * from jiffies_to_ns(utime + stime) if sched_clock uses something 613 * other than jiffies.) 614 */ 615 unsigned long long sum_sched_runtime; 616 617 /* 618 * We don't bother to synchronize most readers of this at all, 619 * because there is no reader checking a limit that actually needs 620 * to get both rlim_cur and rlim_max atomically, and either one 621 * alone is a single word that can safely be read normally. 622 * getrlimit/setrlimit use task_lock(current->group_leader) to 623 * protect this instead of the siglock, because they really 624 * have no need to disable irqs. 625 */ 626 struct rlimit rlim[RLIM_NLIMITS]; 627 628#ifdef CONFIG_BSD_PROCESS_ACCT 629 struct pacct_struct pacct; /* per-process accounting information */ 630#endif 631#ifdef CONFIG_TASKSTATS 632 struct taskstats *stats; 633#endif 634#ifdef CONFIG_AUDIT 635 unsigned audit_tty; 636 struct tty_audit_buf *tty_audit_buf; 637#endif 638#ifdef CONFIG_CGROUPS 639 /* 640 * group_rwsem prevents new tasks from entering the threadgroup and 641 * member tasks from exiting,a more specifically, setting of 642 * PF_EXITING. fork and exit paths are protected with this rwsem 643 * using threadgroup_change_begin/end(). Users which require 644 * threadgroup to remain stable should use threadgroup_[un]lock() 645 * which also takes care of exec path. Currently, cgroup is the 646 * only user. 647 */ 648 struct rw_semaphore group_rwsem; 649#endif 650 651 int oom_adj; /* OOM kill score adjustment (bit shift) */ 652 int oom_score_adj; /* OOM kill score adjustment */ 653 int oom_score_adj_min; /* OOM kill score adjustment minimum value. 654 * Only settable by CAP_SYS_RESOURCE. */ 655 656 struct mutex cred_guard_mutex; /* guard against foreign influences on 657 * credential calculations 658 * (notably. ptrace) */ 659}; 660 661/* Context switch must be unlocked if interrupts are to be enabled */ 662#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 663# define __ARCH_WANT_UNLOCKED_CTXSW 664#endif 665 666/* 667 * Bits in flags field of signal_struct. 668 */ 669#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 670#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 671#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 672/* 673 * Pending notifications to parent. 674 */ 675#define SIGNAL_CLD_STOPPED 0x00000010 676#define SIGNAL_CLD_CONTINUED 0x00000020 677#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 678 679#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 680 681/* If true, all threads except ->group_exit_task have pending SIGKILL */ 682static inline int signal_group_exit(const struct signal_struct *sig) 683{ 684 return (sig->flags & SIGNAL_GROUP_EXIT) || 685 (sig->group_exit_task != NULL); 686} 687 688/* 689 * Some day this will be a full-fledged user tracking system.. 690 */ 691struct user_struct { 692 atomic_t __count; /* reference count */ 693 atomic_t processes; /* How many processes does this user have? */ 694 atomic_t files; /* How many open files does this user have? */ 695 atomic_t sigpending; /* How many pending signals does this user have? */ 696#ifdef CONFIG_INOTIFY_USER 697 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 698 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 699#endif 700#ifdef CONFIG_FANOTIFY 701 atomic_t fanotify_listeners; 702#endif 703#ifdef CONFIG_EPOLL 704 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ 705#endif 706#ifdef CONFIG_POSIX_MQUEUE 707 /* protected by mq_lock */ 708 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 709#endif 710 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 711 712#ifdef CONFIG_KEYS 713 struct key *uid_keyring; /* UID specific keyring */ 714 struct key *session_keyring; /* UID's default session keyring */ 715#endif 716 717 /* Hash table maintenance information */ 718 struct hlist_node uidhash_node; 719 uid_t uid; 720 struct user_namespace *user_ns; 721 722#ifdef CONFIG_PERF_EVENTS 723 atomic_long_t locked_vm; 724#endif 725}; 726 727extern int uids_sysfs_init(void); 728 729extern struct user_struct *find_user(uid_t); 730 731extern struct user_struct root_user; 732#define INIT_USER (&root_user) 733 734 735struct backing_dev_info; 736struct reclaim_state; 737 738#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 739struct sched_info { 740 /* cumulative counters */ 741 unsigned long pcount; /* # of times run on this cpu */ 742 unsigned long long run_delay; /* time spent waiting on a runqueue */ 743 744 /* timestamps */ 745 unsigned long long last_arrival,/* when we last ran on a cpu */ 746 last_queued; /* when we were last queued to run */ 747}; 748#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 749 750#ifdef CONFIG_TASK_DELAY_ACCT 751struct task_delay_info { 752 spinlock_t lock; 753 unsigned int flags; /* Private per-task flags */ 754 755 /* For each stat XXX, add following, aligned appropriately 756 * 757 * struct timespec XXX_start, XXX_end; 758 * u64 XXX_delay; 759 * u32 XXX_count; 760 * 761 * Atomicity of updates to XXX_delay, XXX_count protected by 762 * single lock above (split into XXX_lock if contention is an issue). 763 */ 764 765 /* 766 * XXX_count is incremented on every XXX operation, the delay 767 * associated with the operation is added to XXX_delay. 768 * XXX_delay contains the accumulated delay time in nanoseconds. 769 */ 770 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 771 u64 blkio_delay; /* wait for sync block io completion */ 772 u64 swapin_delay; /* wait for swapin block io completion */ 773 u32 blkio_count; /* total count of the number of sync block */ 774 /* io operations performed */ 775 u32 swapin_count; /* total count of the number of swapin block */ 776 /* io operations performed */ 777 778 struct timespec freepages_start, freepages_end; 779 u64 freepages_delay; /* wait for memory reclaim */ 780 u32 freepages_count; /* total count of memory reclaim */ 781}; 782#endif /* CONFIG_TASK_DELAY_ACCT */ 783 784static inline int sched_info_on(void) 785{ 786#ifdef CONFIG_SCHEDSTATS 787 return 1; 788#elif defined(CONFIG_TASK_DELAY_ACCT) 789 extern int delayacct_on; 790 return delayacct_on; 791#else 792 return 0; 793#endif 794} 795 796enum cpu_idle_type { 797 CPU_IDLE, 798 CPU_NOT_IDLE, 799 CPU_NEWLY_IDLE, 800 CPU_MAX_IDLE_TYPES 801}; 802 803/* 804 * Increase resolution of nice-level calculations for 64-bit architectures. 805 * The extra resolution improves shares distribution and load balancing of 806 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 807 * hierarchies, especially on larger systems. This is not a user-visible change 808 * and does not change the user-interface for setting shares/weights. 809 * 810 * We increase resolution only if we have enough bits to allow this increased 811 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution 812 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the 813 * increased costs. 814 */ 815#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ 816# define SCHED_LOAD_RESOLUTION 10 817# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) 818# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) 819#else 820# define SCHED_LOAD_RESOLUTION 0 821# define scale_load(w) (w) 822# define scale_load_down(w) (w) 823#endif 824 825#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) 826#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 827 828/* 829 * Increase resolution of cpu_power calculations 830 */ 831#define SCHED_POWER_SHIFT 10 832#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) 833 834/* 835 * sched-domains (multiprocessor balancing) declarations: 836 */ 837#ifdef CONFIG_SMP 838#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ 839#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ 840#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ 841#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 842#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ 843#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 844#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ 845#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ 846#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ 847#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 848#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 849#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 850#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 851#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ 852 853enum powersavings_balance_level { 854 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 855 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package 856 * first for long running threads 857 */ 858 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle 859 * cpu package for power savings 860 */ 861 MAX_POWERSAVINGS_BALANCE_LEVELS 862}; 863 864extern int sched_mc_power_savings, sched_smt_power_savings; 865 866static inline int sd_balance_for_mc_power(void) 867{ 868 if (sched_smt_power_savings) 869 return SD_POWERSAVINGS_BALANCE; 870 871 if (!sched_mc_power_savings) 872 return SD_PREFER_SIBLING; 873 874 return 0; 875} 876 877static inline int sd_balance_for_package_power(void) 878{ 879 if (sched_mc_power_savings | sched_smt_power_savings) 880 return SD_POWERSAVINGS_BALANCE; 881 882 return SD_PREFER_SIBLING; 883} 884 885extern int __weak arch_sd_sibiling_asym_packing(void); 886 887/* 888 * Optimise SD flags for power savings: 889 * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. 890 * Keep default SD flags if sched_{smt,mc}_power_saving=0 891 */ 892 893static inline int sd_power_saving_flags(void) 894{ 895 if (sched_mc_power_savings | sched_smt_power_savings) 896 return SD_BALANCE_NEWIDLE; 897 898 return 0; 899} 900 901struct sched_group_power { 902 atomic_t ref; 903 /* 904 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 905 * single CPU. 906 */ 907 unsigned int power, power_orig; 908 /* 909 * Number of busy cpus in this group. 910 */ 911 atomic_t nr_busy_cpus; 912}; 913 914struct sched_group { 915 struct sched_group *next; /* Must be a circular list */ 916 atomic_t ref; 917 918 unsigned int group_weight; 919 struct sched_group_power *sgp; 920 921 /* 922 * The CPUs this group covers. 923 * 924 * NOTE: this field is variable length. (Allocated dynamically 925 * by attaching extra space to the end of the structure, 926 * depending on how many CPUs the kernel has booted up with) 927 */ 928 unsigned long cpumask[0]; 929}; 930 931static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 932{ 933 return to_cpumask(sg->cpumask); 934} 935 936/** 937 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. 938 * @group: The group whose first cpu is to be returned. 939 */ 940static inline unsigned int group_first_cpu(struct sched_group *group) 941{ 942 return cpumask_first(sched_group_cpus(group)); 943} 944 945struct sched_domain_attr { 946 int relax_domain_level; 947}; 948 949#define SD_ATTR_INIT (struct sched_domain_attr) { \ 950 .relax_domain_level = -1, \ 951} 952 953extern int sched_domain_level_max; 954 955struct sched_domain { 956 /* These fields must be setup */ 957 struct sched_domain *parent; /* top domain must be null terminated */ 958 struct sched_domain *child; /* bottom domain must be null terminated */ 959 struct sched_group *groups; /* the balancing groups of the domain */ 960 unsigned long min_interval; /* Minimum balance interval ms */ 961 unsigned long max_interval; /* Maximum balance interval ms */ 962 unsigned int busy_factor; /* less balancing by factor if busy */ 963 unsigned int imbalance_pct; /* No balance until over watermark */ 964 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 965 unsigned int busy_idx; 966 unsigned int idle_idx; 967 unsigned int newidle_idx; 968 unsigned int wake_idx; 969 unsigned int forkexec_idx; 970 unsigned int smt_gain; 971 int flags; /* See SD_* */ 972 int level; 973 974 /* Runtime fields. */ 975 unsigned long last_balance; /* init to jiffies. units in jiffies */ 976 unsigned int balance_interval; /* initialise to 1. units in ms. */ 977 unsigned int nr_balance_failed; /* initialise to 0 */ 978 979 u64 last_update; 980 981#ifdef CONFIG_SCHEDSTATS 982 /* load_balance() stats */ 983 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 984 unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 985 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 986 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; 987 unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 988 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 989 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 990 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 991 992 /* Active load balancing */ 993 unsigned int alb_count; 994 unsigned int alb_failed; 995 unsigned int alb_pushed; 996 997 /* SD_BALANCE_EXEC stats */ 998 unsigned int sbe_count; 999 unsigned int sbe_balanced; 1000 unsigned int sbe_pushed; 1001 1002 /* SD_BALANCE_FORK stats */ 1003 unsigned int sbf_count; 1004 unsigned int sbf_balanced; 1005 unsigned int sbf_pushed; 1006 1007 /* try_to_wake_up() stats */ 1008 unsigned int ttwu_wake_remote; 1009 unsigned int ttwu_move_affine; 1010 unsigned int ttwu_move_balance; 1011#endif 1012#ifdef CONFIG_SCHED_DEBUG 1013 char *name; 1014#endif 1015 union { 1016 void *private; /* used during construction */ 1017 struct rcu_head rcu; /* used during destruction */ 1018 }; 1019 1020 unsigned int span_weight; 1021 /* 1022 * Span of all CPUs in this domain. 1023 * 1024 * NOTE: this field is variable length. (Allocated dynamically 1025 * by attaching extra space to the end of the structure, 1026 * depending on how many CPUs the kernel has booted up with) 1027 */ 1028 unsigned long span[0]; 1029}; 1030 1031static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 1032{ 1033 return to_cpumask(sd->span); 1034} 1035 1036extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1037 struct sched_domain_attr *dattr_new); 1038 1039/* Allocate an array of sched domains, for partition_sched_domains(). */ 1040cpumask_var_t *alloc_sched_domains(unsigned int ndoms); 1041void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); 1042 1043/* Test a flag in parent sched domain */ 1044static inline int test_sd_parent(struct sched_domain *sd, int flag) 1045{ 1046 if (sd->parent && (sd->parent->flags & flag)) 1047 return 1; 1048 1049 return 0; 1050} 1051 1052unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); 1053unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); 1054 1055#else /* CONFIG_SMP */ 1056 1057struct sched_domain_attr; 1058 1059static inline void 1060partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1061 struct sched_domain_attr *dattr_new) 1062{ 1063} 1064#endif /* !CONFIG_SMP */ 1065 1066 1067struct io_context; /* See blkdev.h */ 1068 1069 1070#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 1071extern void prefetch_stack(struct task_struct *t); 1072#else 1073static inline void prefetch_stack(struct task_struct *t) { } 1074#endif 1075 1076struct audit_context; /* See audit.c */ 1077struct mempolicy; 1078struct pipe_inode_info; 1079struct uts_namespace; 1080 1081struct rq; 1082struct sched_domain; 1083 1084/* 1085 * wake flags 1086 */ 1087#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1088#define WF_FORK 0x02 /* child wakeup after fork */ 1089#define WF_MIGRATED 0x04 /* internal use, task got migrated */ 1090 1091#define ENQUEUE_WAKEUP 1 1092#define ENQUEUE_HEAD 2 1093#ifdef CONFIG_SMP 1094#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ 1095#else 1096#define ENQUEUE_WAKING 0 1097#endif 1098 1099#define DEQUEUE_SLEEP 1 1100 1101struct sched_class { 1102 const struct sched_class *next; 1103 1104 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1105 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1106 void (*yield_task) (struct rq *rq); 1107 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); 1108 1109 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1110 1111 struct task_struct * (*pick_next_task) (struct rq *rq); 1112 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1113 1114#ifdef CONFIG_SMP 1115 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1116 1117 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1118 void (*post_schedule) (struct rq *this_rq); 1119 void (*task_waking) (struct task_struct *task); 1120 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1121 1122 void (*set_cpus_allowed)(struct task_struct *p, 1123 const struct cpumask *newmask); 1124 1125 void (*rq_online)(struct rq *rq); 1126 void (*rq_offline)(struct rq *rq); 1127#endif 1128 1129 void (*set_curr_task) (struct rq *rq); 1130 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1131 void (*task_fork) (struct task_struct *p); 1132 1133 void (*switched_from) (struct rq *this_rq, struct task_struct *task); 1134 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1135 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1136 int oldprio); 1137 1138 unsigned int (*get_rr_interval) (struct rq *rq, 1139 struct task_struct *task); 1140 1141#ifdef CONFIG_FAIR_GROUP_SCHED 1142 void (*task_move_group) (struct task_struct *p, int on_rq); 1143#endif 1144}; 1145 1146struct load_weight { 1147 unsigned long weight, inv_weight; 1148}; 1149 1150#ifdef CONFIG_SCHEDSTATS 1151struct sched_statistics { 1152 u64 wait_start; 1153 u64 wait_max; 1154 u64 wait_count; 1155 u64 wait_sum; 1156 u64 iowait_count; 1157 u64 iowait_sum; 1158 1159 u64 sleep_start; 1160 u64 sleep_max; 1161 s64 sum_sleep_runtime; 1162 1163 u64 block_start; 1164 u64 block_max; 1165 u64 exec_max; 1166 u64 slice_max; 1167 1168 u64 nr_migrations_cold; 1169 u64 nr_failed_migrations_affine; 1170 u64 nr_failed_migrations_running; 1171 u64 nr_failed_migrations_hot; 1172 u64 nr_forced_migrations; 1173 1174 u64 nr_wakeups; 1175 u64 nr_wakeups_sync; 1176 u64 nr_wakeups_migrate; 1177 u64 nr_wakeups_local; 1178 u64 nr_wakeups_remote; 1179 u64 nr_wakeups_affine; 1180 u64 nr_wakeups_affine_attempts; 1181 u64 nr_wakeups_passive; 1182 u64 nr_wakeups_idle; 1183}; 1184#endif 1185 1186struct sched_entity { 1187 struct load_weight load; /* for load-balancing */ 1188 struct rb_node run_node; 1189 struct list_head group_node; 1190 unsigned int on_rq; 1191 1192 u64 exec_start; 1193 u64 sum_exec_runtime; 1194 u64 vruntime; 1195 u64 prev_sum_exec_runtime; 1196 1197 u64 nr_migrations; 1198 1199#ifdef CONFIG_SCHEDSTATS 1200 struct sched_statistics statistics; 1201#endif 1202 1203#ifdef CONFIG_FAIR_GROUP_SCHED 1204 struct sched_entity *parent; 1205 /* rq on which this entity is (to be) queued: */ 1206 struct cfs_rq *cfs_rq; 1207 /* rq "owned" by this entity/group: */ 1208 struct cfs_rq *my_q; 1209#endif 1210}; 1211 1212struct sched_rt_entity { 1213 struct list_head run_list; 1214 unsigned long timeout; 1215 unsigned int time_slice; 1216 int nr_cpus_allowed; 1217 1218 struct sched_rt_entity *back; 1219#ifdef CONFIG_RT_GROUP_SCHED 1220 struct sched_rt_entity *parent; 1221 /* rq on which this entity is (to be) queued: */ 1222 struct rt_rq *rt_rq; 1223 /* rq "owned" by this entity/group: */ 1224 struct rt_rq *my_q; 1225#endif 1226}; 1227 1228struct rcu_node; 1229 1230enum perf_event_task_context { 1231 perf_invalid_context = -1, 1232 perf_hw_context = 0, 1233 perf_sw_context, 1234 perf_nr_task_contexts, 1235}; 1236 1237struct task_struct { 1238 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1239 void *stack; 1240 atomic_t usage; 1241 unsigned int flags; /* per process flags, defined below */ 1242 unsigned int ptrace; 1243 1244#ifdef CONFIG_SMP 1245 struct llist_node wake_entry; 1246 int on_cpu; 1247#endif 1248 int on_rq; 1249 1250 int prio, static_prio, normal_prio; 1251 unsigned int rt_priority; 1252 const struct sched_class *sched_class; 1253 struct sched_entity se; 1254 struct sched_rt_entity rt; 1255 1256#ifdef CONFIG_PREEMPT_NOTIFIERS 1257 /* list of struct preempt_notifier: */ 1258 struct hlist_head preempt_notifiers; 1259#endif 1260 1261 /* 1262 * fpu_counter contains the number of consecutive context switches 1263 * that the FPU is used. If this is over a threshold, the lazy fpu 1264 * saving becomes unlazy to save the trap. This is an unsigned char 1265 * so that after 256 times the counter wraps and the behavior turns 1266 * lazy again; this to deal with bursty apps that only use FPU for 1267 * a short time 1268 */ 1269 unsigned char fpu_counter; 1270#ifdef CONFIG_BLK_DEV_IO_TRACE 1271 unsigned int btrace_seq; 1272#endif 1273 1274 unsigned int policy; 1275 cpumask_t cpus_allowed; 1276 1277#ifdef CONFIG_PREEMPT_RCU 1278 int rcu_read_lock_nesting; 1279 char rcu_read_unlock_special; 1280 struct list_head rcu_node_entry; 1281#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1282#ifdef CONFIG_TREE_PREEMPT_RCU 1283 struct rcu_node *rcu_blocked_node; 1284#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1285#ifdef CONFIG_RCU_BOOST 1286 struct rt_mutex *rcu_boost_mutex; 1287#endif /* #ifdef CONFIG_RCU_BOOST */ 1288 1289#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1290 struct sched_info sched_info; 1291#endif 1292 1293 struct list_head tasks; 1294#ifdef CONFIG_SMP 1295 struct plist_node pushable_tasks; 1296#endif 1297 1298 struct mm_struct *mm, *active_mm; 1299#ifdef CONFIG_COMPAT_BRK 1300 unsigned brk_randomized:1; 1301#endif 1302#if defined(SPLIT_RSS_COUNTING) 1303 struct task_rss_stat rss_stat; 1304#endif 1305/* task state */ 1306 int exit_state; 1307 int exit_code, exit_signal; 1308 int pdeath_signal; /* The signal sent when the parent dies */ 1309 unsigned int jobctl; /* JOBCTL_*, siglock protected */ 1310 /* ??? */ 1311 unsigned int personality; 1312 unsigned did_exec:1; 1313 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1314 * execve */ 1315 unsigned in_iowait:1; 1316 1317 1318 /* Revert to default priority/policy when forking */ 1319 unsigned sched_reset_on_fork:1; 1320 unsigned sched_contributes_to_load:1; 1321 1322 pid_t pid; 1323 pid_t tgid; 1324 1325#ifdef CONFIG_CC_STACKPROTECTOR 1326 /* Canary value for the -fstack-protector gcc feature */ 1327 unsigned long stack_canary; 1328#endif 1329 1330 /* 1331 * pointers to (original) parent process, youngest child, younger sibling, 1332 * older sibling, respectively. (p->father can be replaced with 1333 * p->real_parent->pid) 1334 */ 1335 struct task_struct __rcu *real_parent; /* real parent process */ 1336 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ 1337 /* 1338 * children/sibling forms the list of my natural children 1339 */ 1340 struct list_head children; /* list of my children */ 1341 struct list_head sibling; /* linkage in my parent's children list */ 1342 struct task_struct *group_leader; /* threadgroup leader */ 1343 1344 /* 1345 * ptraced is the list of tasks this task is using ptrace on. 1346 * This includes both natural children and PTRACE_ATTACH targets. 1347 * p->ptrace_entry is p's link on the p->parent->ptraced list. 1348 */ 1349 struct list_head ptraced; 1350 struct list_head ptrace_entry; 1351 1352 /* PID/PID hash table linkage. */ 1353 struct pid_link pids[PIDTYPE_MAX]; 1354 struct list_head thread_group; 1355 1356 struct completion *vfork_done; /* for vfork() */ 1357 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1358 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1359 1360 cputime_t utime, stime, utimescaled, stimescaled; 1361 cputime_t gtime; 1362#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1363 cputime_t prev_utime, prev_stime; 1364#endif 1365 unsigned long nvcsw, nivcsw; /* context switch counts */ 1366 struct timespec start_time; /* monotonic time */ 1367 struct timespec real_start_time; /* boot based time */ 1368/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1369 unsigned long min_flt, maj_flt; 1370 1371 struct task_cputime cputime_expires; 1372 struct list_head cpu_timers[3]; 1373 1374/* process credentials */ 1375 const struct cred __rcu *real_cred; /* objective and real subjective task 1376 * credentials (COW) */ 1377 const struct cred __rcu *cred; /* effective (overridable) subjective task 1378 * credentials (COW) */ 1379 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ 1380 1381 char comm[TASK_COMM_LEN]; /* executable name excluding path 1382 - access with [gs]et_task_comm (which lock 1383 it with task_lock()) 1384 - initialized normally by setup_new_exec */ 1385/* file system info */ 1386 int link_count, total_link_count; 1387#ifdef CONFIG_SYSVIPC 1388/* ipc stuff */ 1389 struct sysv_sem sysvsem; 1390#endif 1391#ifdef CONFIG_DETECT_HUNG_TASK 1392/* hung task detection */ 1393 unsigned long last_switch_count; 1394#endif 1395/* CPU-specific state of this task */ 1396 struct thread_struct thread; 1397/* filesystem information */ 1398 struct fs_struct *fs; 1399/* open file information */ 1400 struct files_struct *files; 1401/* namespaces */ 1402 struct nsproxy *nsproxy; 1403/* signal handlers */ 1404 struct signal_struct *signal; 1405 struct sighand_struct *sighand; 1406 1407 sigset_t blocked, real_blocked; 1408 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 1409 struct sigpending pending; 1410 1411 unsigned long sas_ss_sp; 1412 size_t sas_ss_size; 1413 int (*notifier)(void *priv); 1414 void *notifier_data; 1415 sigset_t *notifier_mask; 1416 struct audit_context *audit_context; 1417#ifdef CONFIG_AUDITSYSCALL 1418 uid_t loginuid; 1419 unsigned int sessionid; 1420#endif 1421 seccomp_t seccomp; 1422 1423/* Thread group tracking */ 1424 u32 parent_exec_id; 1425 u32 self_exec_id; 1426/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, 1427 * mempolicy */ 1428 spinlock_t alloc_lock; 1429 1430#ifdef CONFIG_GENERIC_HARDIRQS 1431 /* IRQ handler threads */ 1432 struct irqaction *irqaction; 1433#endif 1434 1435 /* Protection of the PI data structures: */ 1436 raw_spinlock_t pi_lock; 1437 1438#ifdef CONFIG_RT_MUTEXES 1439 /* PI waiters blocked on a rt_mutex held by this task */ 1440 struct plist_head pi_waiters; 1441 /* Deadlock detection and priority inheritance handling */ 1442 struct rt_mutex_waiter *pi_blocked_on; 1443#endif 1444 1445#ifdef CONFIG_DEBUG_MUTEXES 1446 /* mutex deadlock detection */ 1447 struct mutex_waiter *blocked_on; 1448#endif 1449#ifdef CONFIG_TRACE_IRQFLAGS 1450 unsigned int irq_events; 1451 unsigned long hardirq_enable_ip; 1452 unsigned long hardirq_disable_ip; 1453 unsigned int hardirq_enable_event; 1454 unsigned int hardirq_disable_event; 1455 int hardirqs_enabled; 1456 int hardirq_context; 1457 unsigned long softirq_disable_ip; 1458 unsigned long softirq_enable_ip; 1459 unsigned int softirq_disable_event; 1460 unsigned int softirq_enable_event; 1461 int softirqs_enabled; 1462 int softirq_context; 1463#endif 1464#ifdef CONFIG_LOCKDEP 1465# define MAX_LOCK_DEPTH 48UL 1466 u64 curr_chain_key; 1467 int lockdep_depth; 1468 unsigned int lockdep_recursion; 1469 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1470 gfp_t lockdep_reclaim_gfp; 1471#endif 1472 1473/* journalling filesystem info */ 1474 void *journal_info; 1475 1476/* stacked block device info */ 1477 struct bio_list *bio_list; 1478 1479#ifdef CONFIG_BLOCK 1480/* stack plugging */ 1481 struct blk_plug *plug; 1482#endif 1483 1484/* VM state */ 1485 struct reclaim_state *reclaim_state; 1486 1487 struct backing_dev_info *backing_dev_info; 1488 1489 struct io_context *io_context; 1490 1491 unsigned long ptrace_message; 1492 siginfo_t *last_siginfo; /* For ptrace use. */ 1493 struct task_io_accounting ioac; 1494#if defined(CONFIG_TASK_XACCT) 1495 u64 acct_rss_mem1; /* accumulated rss usage */ 1496 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1497 cputime_t acct_timexpd; /* stime + utime since last update */ 1498#endif 1499#ifdef CONFIG_CPUSETS 1500 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1501 int mems_allowed_change_disable; 1502 int cpuset_mem_spread_rotor; 1503 int cpuset_slab_spread_rotor; 1504#endif 1505#ifdef CONFIG_CGROUPS 1506 /* Control Group info protected by css_set_lock */ 1507 struct css_set __rcu *cgroups; 1508 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1509 struct list_head cg_list; 1510#endif 1511#ifdef CONFIG_FUTEX 1512 struct robust_list_head __user *robust_list; 1513#ifdef CONFIG_COMPAT 1514 struct compat_robust_list_head __user *compat_robust_list; 1515#endif 1516 struct list_head pi_state_list; 1517 struct futex_pi_state *pi_state_cache; 1518#endif 1519#ifdef CONFIG_PERF_EVENTS 1520 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 1521 struct mutex perf_event_mutex; 1522 struct list_head perf_event_list; 1523#endif 1524#ifdef CONFIG_NUMA 1525 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1526 short il_next; 1527 short pref_node_fork; 1528#endif 1529 struct rcu_head rcu; 1530 1531 /* 1532 * cache last used pipe for splice 1533 */ 1534 struct pipe_inode_info *splice_pipe; 1535#ifdef CONFIG_TASK_DELAY_ACCT 1536 struct task_delay_info *delays; 1537#endif 1538#ifdef CONFIG_FAULT_INJECTION 1539 int make_it_fail; 1540#endif 1541 /* 1542 * when (nr_dirtied >= nr_dirtied_pause), it's time to call 1543 * balance_dirty_pages() for some dirty throttling pause 1544 */ 1545 int nr_dirtied; 1546 int nr_dirtied_pause; 1547 unsigned long dirty_paused_when; /* start of a write-and-pause period */ 1548 1549#ifdef CONFIG_LATENCYTOP 1550 int latency_record_count; 1551 struct latency_record latency_record[LT_SAVECOUNT]; 1552#endif 1553 /* 1554 * time slack values; these are used to round up poll() and 1555 * select() etc timeout values. These are in nanoseconds. 1556 */ 1557 unsigned long timer_slack_ns; 1558 unsigned long default_timer_slack_ns; 1559 1560 struct list_head *scm_work_list; 1561#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1562 /* Index of current stored address in ret_stack */ 1563 int curr_ret_stack; 1564 /* Stack of return addresses for return function tracing */ 1565 struct ftrace_ret_stack *ret_stack; 1566 /* time stamp for last schedule */ 1567 unsigned long long ftrace_timestamp; 1568 /* 1569 * Number of functions that haven't been traced 1570 * because of depth overrun. 1571 */ 1572 atomic_t trace_overrun; 1573 /* Pause for the tracing */ 1574 atomic_t tracing_graph_pause; 1575#endif 1576#ifdef CONFIG_TRACING 1577 /* state flags for use by tracers */ 1578 unsigned long trace; 1579 /* bitmask and counter of trace recursion */ 1580 unsigned long trace_recursion; 1581#endif /* CONFIG_TRACING */ 1582#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ 1583 struct memcg_batch_info { 1584 int do_batch; /* incremented when batch uncharge started */ 1585 struct mem_cgroup *memcg; /* target memcg of uncharge */ 1586 unsigned long nr_pages; /* uncharged usage */ 1587 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ 1588 } memcg_batch; 1589#endif 1590#ifdef CONFIG_HAVE_HW_BREAKPOINT 1591 atomic_t ptrace_bp_refcnt; 1592#endif 1593#ifdef CONFIG_UPROBES 1594 struct uprobe_task *utask; 1595 int uprobe_srcu_id; 1596#endif 1597}; 1598 1599/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1600#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1601 1602/* 1603 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1604 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH 1605 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority 1606 * values are inverted: lower p->prio value means higher priority. 1607 * 1608 * The MAX_USER_RT_PRIO value allows the actual maximum 1609 * RT priority to be separate from the value exported to 1610 * user-space. This allows kernel threads to set their 1611 * priority to a value higher than any user task. Note: 1612 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. 1613 */ 1614 1615#define MAX_USER_RT_PRIO 100 1616#define MAX_RT_PRIO MAX_USER_RT_PRIO 1617 1618#define MAX_PRIO (MAX_RT_PRIO + 40) 1619#define DEFAULT_PRIO (MAX_RT_PRIO + 20) 1620 1621static inline int rt_prio(int prio) 1622{ 1623 if (unlikely(prio < MAX_RT_PRIO)) 1624 return 1; 1625 return 0; 1626} 1627 1628static inline int rt_task(struct task_struct *p) 1629{ 1630 return rt_prio(p->prio); 1631} 1632 1633static inline struct pid *task_pid(struct task_struct *task) 1634{ 1635 return task->pids[PIDTYPE_PID].pid; 1636} 1637 1638static inline struct pid *task_tgid(struct task_struct *task) 1639{ 1640 return task->group_leader->pids[PIDTYPE_PID].pid; 1641} 1642 1643/* 1644 * Without tasklist or rcu lock it is not safe to dereference 1645 * the result of task_pgrp/task_session even if task == current, 1646 * we can race with another thread doing sys_setsid/sys_setpgid. 1647 */ 1648static inline struct pid *task_pgrp(struct task_struct *task) 1649{ 1650 return task->group_leader->pids[PIDTYPE_PGID].pid; 1651} 1652 1653static inline struct pid *task_session(struct task_struct *task) 1654{ 1655 return task->group_leader->pids[PIDTYPE_SID].pid; 1656} 1657 1658struct pid_namespace; 1659 1660/* 1661 * the helpers to get the task's different pids as they are seen 1662 * from various namespaces 1663 * 1664 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1665 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1666 * current. 1667 * task_xid_nr_ns() : id seen from the ns specified; 1668 * 1669 * set_task_vxid() : assigns a virtual id to a task; 1670 * 1671 * see also pid_nr() etc in include/linux/pid.h 1672 */ 1673pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 1674 struct pid_namespace *ns); 1675 1676static inline pid_t task_pid_nr(struct task_struct *tsk) 1677{ 1678 return tsk->pid; 1679} 1680 1681static inline pid_t task_pid_nr_ns(struct task_struct *tsk, 1682 struct pid_namespace *ns) 1683{ 1684 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1685} 1686 1687static inline pid_t task_pid_vnr(struct task_struct *tsk) 1688{ 1689 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1690} 1691 1692 1693static inline pid_t task_tgid_nr(struct task_struct *tsk) 1694{ 1695 return tsk->tgid; 1696} 1697 1698pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1699 1700static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1701{ 1702 return pid_vnr(task_tgid(tsk)); 1703} 1704 1705 1706static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, 1707 struct pid_namespace *ns) 1708{ 1709 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1710} 1711 1712static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1713{ 1714 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1715} 1716 1717 1718static inline pid_t task_session_nr_ns(struct task_struct *tsk, 1719 struct pid_namespace *ns) 1720{ 1721 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1722} 1723 1724static inline pid_t task_session_vnr(struct task_struct *tsk) 1725{ 1726 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1727} 1728 1729/* obsolete, do not use */ 1730static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1731{ 1732 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1733} 1734 1735/** 1736 * pid_alive - check that a task structure is not stale 1737 * @p: Task structure to be checked. 1738 * 1739 * Test if a process is not yet dead (at most zombie state) 1740 * If pid_alive fails, then pointers within the task structure 1741 * can be stale and must not be dereferenced. 1742 */ 1743static inline int pid_alive(struct task_struct *p) 1744{ 1745 return p->pids[PIDTYPE_PID].pid != NULL; 1746} 1747 1748/** 1749 * is_global_init - check if a task structure is init 1750 * @tsk: Task structure to be checked. 1751 * 1752 * Check if a task structure is the first user space task the kernel created. 1753 */ 1754static inline int is_global_init(struct task_struct *tsk) 1755{ 1756 return tsk->pid == 1; 1757} 1758 1759/* 1760 * is_container_init: 1761 * check whether in the task is init in its own pid namespace. 1762 */ 1763extern int is_container_init(struct task_struct *tsk); 1764 1765extern struct pid *cad_pid; 1766 1767extern void free_task(struct task_struct *tsk); 1768#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 1769 1770extern void __put_task_struct(struct task_struct *t); 1771 1772static inline void put_task_struct(struct task_struct *t) 1773{ 1774 if (atomic_dec_and_test(&t->usage)) 1775 __put_task_struct(t); 1776} 1777 1778extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); 1779extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); 1780 1781/* 1782 * Per process flags 1783 */ 1784#define PF_STARTING 0x00000002 /* being created */ 1785#define PF_EXITING 0x00000004 /* getting shut down */ 1786#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1787#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1788#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1789#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1790#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 1791#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1792#define PF_DUMPCORE 0x00000200 /* dumped core */ 1793#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1794#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1795#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 1796#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1797#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1798#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1799#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1800#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1801#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1802#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1803#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1804#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1805#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1806#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1807#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ 1808#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1809#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1810#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1811#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1812 1813/* 1814 * Only the _current_ task can read/write to tsk->flags, but other 1815 * tasks can access tsk->flags in readonly mode for example 1816 * with tsk_used_math (like during threaded core dumping). 1817 * There is however an exception to this rule during ptrace 1818 * or during fork: the ptracer task is allowed to write to the 1819 * child->flags of its traced child (same goes for fork, the parent 1820 * can write to the child->flags), because we're guaranteed the 1821 * child is not running and in turn not changing child->flags 1822 * at the same time the parent does it. 1823 */ 1824#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1825#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1826#define clear_used_math() clear_stopped_child_used_math(current) 1827#define set_used_math() set_stopped_child_used_math(current) 1828#define conditional_stopped_child_used_math(condition, child) \ 1829 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1830#define conditional_used_math(condition) \ 1831 conditional_stopped_child_used_math(condition, current) 1832#define copy_to_stopped_child_used_math(child) \ 1833 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1834/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1835#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1836#define used_math() tsk_used_math(current) 1837 1838/* 1839 * task->jobctl flags 1840 */ 1841#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ 1842 1843#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ 1844#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ 1845#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ 1846#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ 1847#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ 1848#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ 1849#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ 1850 1851#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) 1852#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) 1853#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) 1854#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) 1855#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) 1856#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) 1857#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) 1858 1859#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) 1860#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) 1861 1862extern bool task_set_jobctl_pending(struct task_struct *task, 1863 unsigned int mask); 1864extern void task_clear_jobctl_trapping(struct task_struct *task); 1865extern void task_clear_jobctl_pending(struct task_struct *task, 1866 unsigned int mask); 1867 1868#ifdef CONFIG_PREEMPT_RCU 1869 1870#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1871#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ 1872#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */ 1873 1874static inline void rcu_copy_process(struct task_struct *p) 1875{ 1876 p->rcu_read_lock_nesting = 0; 1877 p->rcu_read_unlock_special = 0; 1878#ifdef CONFIG_TREE_PREEMPT_RCU 1879 p->rcu_blocked_node = NULL; 1880#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1881#ifdef CONFIG_RCU_BOOST 1882 p->rcu_boost_mutex = NULL; 1883#endif /* #ifdef CONFIG_RCU_BOOST */ 1884 INIT_LIST_HEAD(&p->rcu_node_entry); 1885} 1886 1887#else 1888 1889static inline void rcu_copy_process(struct task_struct *p) 1890{ 1891} 1892 1893#endif 1894 1895#ifdef CONFIG_SMP 1896extern void do_set_cpus_allowed(struct task_struct *p, 1897 const struct cpumask *new_mask); 1898 1899extern int set_cpus_allowed_ptr(struct task_struct *p, 1900 const struct cpumask *new_mask); 1901#else 1902static inline void do_set_cpus_allowed(struct task_struct *p, 1903 const struct cpumask *new_mask) 1904{ 1905} 1906static inline int set_cpus_allowed_ptr(struct task_struct *p, 1907 const struct cpumask *new_mask) 1908{ 1909 if (!cpumask_test_cpu(0, new_mask)) 1910 return -EINVAL; 1911 return 0; 1912} 1913#endif 1914 1915#ifndef CONFIG_CPUMASK_OFFSTACK 1916static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1917{ 1918 return set_cpus_allowed_ptr(p, &new_mask); 1919} 1920#endif 1921 1922/* 1923 * Do not use outside of architecture code which knows its limitations. 1924 * 1925 * sched_clock() has no promise of monotonicity or bounded drift between 1926 * CPUs, use (which you should not) requires disabling IRQs. 1927 * 1928 * Please use one of the three interfaces below. 1929 */ 1930extern unsigned long long notrace sched_clock(void); 1931/* 1932 * See the comment in kernel/sched_clock.c 1933 */ 1934extern u64 cpu_clock(int cpu); 1935extern u64 local_clock(void); 1936extern u64 sched_clock_cpu(int cpu); 1937 1938 1939extern void sched_clock_init(void); 1940 1941#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1942static inline void sched_clock_tick(void) 1943{ 1944} 1945 1946static inline void sched_clock_idle_sleep_event(void) 1947{ 1948} 1949 1950static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 1951{ 1952} 1953#else 1954/* 1955 * Architectures can set this to 1 if they have specified 1956 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 1957 * but then during bootup it turns out that sched_clock() 1958 * is reliable after all: 1959 */ 1960extern int sched_clock_stable; 1961 1962extern void sched_clock_tick(void); 1963extern void sched_clock_idle_sleep_event(void); 1964extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1965#endif 1966 1967#ifdef CONFIG_IRQ_TIME_ACCOUNTING 1968/* 1969 * An i/f to runtime opt-in for irq time accounting based off of sched_clock. 1970 * The reason for this explicit opt-in is not to have perf penalty with 1971 * slow sched_clocks. 1972 */ 1973extern void enable_sched_clock_irqtime(void); 1974extern void disable_sched_clock_irqtime(void); 1975#else 1976static inline void enable_sched_clock_irqtime(void) {} 1977static inline void disable_sched_clock_irqtime(void) {} 1978#endif 1979 1980extern unsigned long long 1981task_sched_runtime(struct task_struct *task); 1982 1983/* sched_exec is called by processes performing an exec */ 1984#ifdef CONFIG_SMP 1985extern void sched_exec(void); 1986#else 1987#define sched_exec() {} 1988#endif 1989 1990extern void sched_clock_idle_sleep_event(void); 1991extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1992 1993#ifdef CONFIG_HOTPLUG_CPU 1994extern void idle_task_exit(void); 1995#else 1996static inline void idle_task_exit(void) {} 1997#endif 1998 1999#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 2000extern void wake_up_idle_cpu(int cpu); 2001#else 2002static inline void wake_up_idle_cpu(int cpu) { } 2003#endif 2004 2005extern unsigned int sysctl_sched_latency; 2006extern unsigned int sysctl_sched_min_granularity; 2007extern unsigned int sysctl_sched_wakeup_granularity; 2008extern unsigned int sysctl_sched_child_runs_first; 2009 2010enum sched_tunable_scaling { 2011 SCHED_TUNABLESCALING_NONE, 2012 SCHED_TUNABLESCALING_LOG, 2013 SCHED_TUNABLESCALING_LINEAR, 2014 SCHED_TUNABLESCALING_END, 2015}; 2016extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; 2017 2018#ifdef CONFIG_SCHED_DEBUG 2019extern unsigned int sysctl_sched_migration_cost; 2020extern unsigned int sysctl_sched_nr_migrate; 2021extern unsigned int sysctl_sched_time_avg; 2022extern unsigned int sysctl_timer_migration; 2023extern unsigned int sysctl_sched_shares_window; 2024 2025int sched_proc_update_handler(struct ctl_table *table, int write, 2026 void __user *buffer, size_t *length, 2027 loff_t *ppos); 2028#endif 2029#ifdef CONFIG_SCHED_DEBUG 2030static inline unsigned int get_sysctl_timer_migration(void) 2031{ 2032 return sysctl_timer_migration; 2033} 2034#else 2035static inline unsigned int get_sysctl_timer_migration(void) 2036{ 2037 return 1; 2038} 2039#endif 2040extern unsigned int sysctl_sched_rt_period; 2041extern int sysctl_sched_rt_runtime; 2042 2043int sched_rt_handler(struct ctl_table *table, int write, 2044 void __user *buffer, size_t *lenp, 2045 loff_t *ppos); 2046 2047#ifdef CONFIG_SCHED_AUTOGROUP 2048extern unsigned int sysctl_sched_autogroup_enabled; 2049 2050extern void sched_autogroup_create_attach(struct task_struct *p); 2051extern void sched_autogroup_detach(struct task_struct *p); 2052extern void sched_autogroup_fork(struct signal_struct *sig); 2053extern void sched_autogroup_exit(struct signal_struct *sig); 2054#ifdef CONFIG_PROC_FS 2055extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2056extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); 2057#endif 2058#else 2059static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2060static inline void sched_autogroup_detach(struct task_struct *p) { } 2061static inline void sched_autogroup_fork(struct signal_struct *sig) { } 2062static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2063#endif 2064 2065#ifdef CONFIG_CFS_BANDWIDTH 2066extern unsigned int sysctl_sched_cfs_bandwidth_slice; 2067#endif 2068 2069#ifdef CONFIG_RT_MUTEXES 2070extern int rt_mutex_getprio(struct task_struct *p); 2071extern void rt_mutex_setprio(struct task_struct *p, int prio); 2072extern void rt_mutex_adjust_pi(struct task_struct *p); 2073#else 2074static inline int rt_mutex_getprio(struct task_struct *p) 2075{ 2076 return p->normal_prio; 2077} 2078# define rt_mutex_adjust_pi(p) do { } while (0) 2079#endif 2080 2081extern bool yield_to(struct task_struct *p, bool preempt); 2082extern void set_user_nice(struct task_struct *p, long nice); 2083extern int task_prio(const struct task_struct *p); 2084extern int task_nice(const struct task_struct *p); 2085extern int can_nice(const struct task_struct *p, const int nice); 2086extern int task_curr(const struct task_struct *p); 2087extern int idle_cpu(int cpu); 2088extern int sched_setscheduler(struct task_struct *, int, 2089 const struct sched_param *); 2090extern int sched_setscheduler_nocheck(struct task_struct *, int, 2091 const struct sched_param *); 2092extern struct task_struct *idle_task(int cpu); 2093/** 2094 * is_idle_task - is the specified task an idle task? 2095 * @p: the task in question. 2096 */ 2097static inline bool is_idle_task(const struct task_struct *p) 2098{ 2099 return p->pid == 0; 2100} 2101extern struct task_struct *curr_task(int cpu); 2102extern void set_curr_task(int cpu, struct task_struct *p); 2103 2104void yield(void); 2105 2106/* 2107 * The default (Linux) execution domain. 2108 */ 2109extern struct exec_domain default_exec_domain; 2110 2111union thread_union { 2112 struct thread_info thread_info; 2113 unsigned long stack[THREAD_SIZE/sizeof(long)]; 2114}; 2115 2116#ifndef __HAVE_ARCH_KSTACK_END 2117static inline int kstack_end(void *addr) 2118{ 2119 /* Reliable end of stack detection: 2120 * Some APM bios versions misalign the stack 2121 */ 2122 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 2123} 2124#endif 2125 2126extern union thread_union init_thread_union; 2127extern struct task_struct init_task; 2128 2129extern struct mm_struct init_mm; 2130 2131extern struct pid_namespace init_pid_ns; 2132 2133/* 2134 * find a task by one of its numerical ids 2135 * 2136 * find_task_by_pid_ns(): 2137 * finds a task by its pid in the specified namespace 2138 * find_task_by_vpid(): 2139 * finds a task by its virtual pid 2140 * 2141 * see also find_vpid() etc in include/linux/pid.h 2142 */ 2143 2144extern struct task_struct *find_task_by_vpid(pid_t nr); 2145extern struct task_struct *find_task_by_pid_ns(pid_t nr, 2146 struct pid_namespace *ns); 2147 2148extern void __set_special_pids(struct pid *pid); 2149 2150/* per-UID process charging. */ 2151extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); 2152static inline struct user_struct *get_uid(struct user_struct *u) 2153{ 2154 atomic_inc(&u->__count); 2155 return u; 2156} 2157extern void free_uid(struct user_struct *); 2158extern void release_uids(struct user_namespace *ns); 2159 2160#include <asm/current.h> 2161 2162extern void xtime_update(unsigned long ticks); 2163 2164extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2165extern int wake_up_process(struct task_struct *tsk); 2166extern void wake_up_new_task(struct task_struct *tsk); 2167#ifdef CONFIG_SMP 2168 extern void kick_process(struct task_struct *tsk); 2169#else 2170 static inline void kick_process(struct task_struct *tsk) { } 2171#endif 2172extern void sched_fork(struct task_struct *p); 2173extern void sched_dead(struct task_struct *p); 2174 2175extern void proc_caches_init(void); 2176extern void flush_signals(struct task_struct *); 2177extern void __flush_signals(struct task_struct *); 2178extern void ignore_signals(struct task_struct *); 2179extern void flush_signal_handlers(struct task_struct *, int force_default); 2180extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 2181 2182static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 2183{ 2184 unsigned long flags; 2185 int ret; 2186 2187 spin_lock_irqsave(&tsk->sighand->siglock, flags); 2188 ret = dequeue_signal(tsk, mask, info); 2189 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2190 2191 return ret; 2192} 2193 2194extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2195 sigset_t *mask); 2196extern void unblock_all_signals(void); 2197extern void release_task(struct task_struct * p); 2198extern int send_sig_info(int, struct siginfo *, struct task_struct *); 2199extern int force_sigsegv(int, struct task_struct *); 2200extern int force_sig_info(int, struct siginfo *, struct task_struct *); 2201extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 2202extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 2203extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, 2204 const struct cred *, u32); 2205extern int kill_pgrp(struct pid *pid, int sig, int priv); 2206extern int kill_pid(struct pid *pid, int sig, int priv); 2207extern int kill_proc_info(int, struct siginfo *, pid_t); 2208extern __must_check bool do_notify_parent(struct task_struct *, int); 2209extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2210extern void force_sig(int, struct task_struct *); 2211extern int send_sig(int, struct task_struct *, int); 2212extern int zap_other_threads(struct task_struct *p); 2213extern struct sigqueue *sigqueue_alloc(void); 2214extern void sigqueue_free(struct sigqueue *); 2215extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2216extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 2217extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); 2218 2219static inline int kill_cad_pid(int sig, int priv) 2220{ 2221 return kill_pid(cad_pid, sig, priv); 2222} 2223 2224/* These can be the second arg to send_sig_info/send_group_sig_info. */ 2225#define SEND_SIG_NOINFO ((struct siginfo *) 0) 2226#define SEND_SIG_PRIV ((struct siginfo *) 1) 2227#define SEND_SIG_FORCED ((struct siginfo *) 2) 2228 2229/* 2230 * True if we are on the alternate signal stack. 2231 */ 2232static inline int on_sig_stack(unsigned long sp) 2233{ 2234#ifdef CONFIG_STACK_GROWSUP 2235 return sp >= current->sas_ss_sp && 2236 sp - current->sas_ss_sp < current->sas_ss_size; 2237#else 2238 return sp > current->sas_ss_sp && 2239 sp - current->sas_ss_sp <= current->sas_ss_size; 2240#endif 2241} 2242 2243static inline int sas_ss_flags(unsigned long sp) 2244{ 2245 return (current->sas_ss_size == 0 ? SS_DISABLE 2246 : on_sig_stack(sp) ? SS_ONSTACK : 0); 2247} 2248 2249/* 2250 * Routines for handling mm_structs 2251 */ 2252extern struct mm_struct * mm_alloc(void); 2253 2254/* mmdrop drops the mm and the page tables */ 2255extern void __mmdrop(struct mm_struct *); 2256static inline void mmdrop(struct mm_struct * mm) 2257{ 2258 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2259 __mmdrop(mm); 2260} 2261 2262/* mmput gets rid of the mappings and all user-space */ 2263extern void mmput(struct mm_struct *); 2264/* Grab a reference to a task's mm, if it is not already going away */ 2265extern struct mm_struct *get_task_mm(struct task_struct *task); 2266/* 2267 * Grab a reference to a task's mm, if it is not already going away 2268 * and ptrace_may_access with the mode parameter passed to it 2269 * succeeds. 2270 */ 2271extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 2272/* Remove the current tasks stale references to the old mm_struct */ 2273extern void mm_release(struct task_struct *, struct mm_struct *); 2274/* Allocate a new mm structure and copy contents from tsk->mm */ 2275extern struct mm_struct *dup_mm(struct task_struct *tsk); 2276 2277extern int copy_thread(unsigned long, unsigned long, unsigned long, 2278 struct task_struct *, struct pt_regs *); 2279extern void flush_thread(void); 2280extern void exit_thread(void); 2281 2282extern void exit_files(struct task_struct *); 2283extern void __cleanup_sighand(struct sighand_struct *); 2284 2285extern void exit_itimers(struct signal_struct *); 2286extern void flush_itimer_signals(void); 2287 2288extern void do_group_exit(int); 2289 2290extern void daemonize(const char *, ...); 2291extern int allow_signal(int); 2292extern int disallow_signal(int); 2293 2294extern int do_execve(const char *, 2295 const char __user * const __user *, 2296 const char __user * const __user *, struct pt_regs *); 2297extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 2298struct task_struct *fork_idle(int); 2299 2300extern void set_task_comm(struct task_struct *tsk, char *from); 2301extern char *get_task_comm(char *to, struct task_struct *tsk); 2302 2303#ifdef CONFIG_SMP 2304void scheduler_ipi(void); 2305extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2306#else 2307static inline void scheduler_ipi(void) { } 2308static inline unsigned long wait_task_inactive(struct task_struct *p, 2309 long match_state) 2310{ 2311 return 1; 2312} 2313#endif 2314 2315#define next_task(p) \ 2316 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 2317 2318#define for_each_process(p) \ 2319 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2320 2321extern bool current_is_single_threaded(void); 2322 2323/* 2324 * Careful: do_each_thread/while_each_thread is a double loop so 2325 * 'break' will not work as expected - use goto instead. 2326 */ 2327#define do_each_thread(g, t) \ 2328 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 2329 2330#define while_each_thread(g, t) \ 2331 while ((t = next_thread(t)) != g) 2332 2333static inline int get_nr_threads(struct task_struct *tsk) 2334{ 2335 return tsk->signal->nr_threads; 2336} 2337 2338static inline bool thread_group_leader(struct task_struct *p) 2339{ 2340 return p->exit_signal >= 0; 2341} 2342 2343/* Do to the insanities of de_thread it is possible for a process 2344 * to have the pid of the thread group leader without actually being 2345 * the thread group leader. For iteration through the pids in proc 2346 * all we care about is that we have a task with the appropriate 2347 * pid, we don't actually care if we have the right task. 2348 */ 2349static inline int has_group_leader_pid(struct task_struct *p) 2350{ 2351 return p->pid == p->tgid; 2352} 2353 2354static inline 2355int same_thread_group(struct task_struct *p1, struct task_struct *p2) 2356{ 2357 return p1->tgid == p2->tgid; 2358} 2359 2360static inline struct task_struct *next_thread(const struct task_struct *p) 2361{ 2362 return list_entry_rcu(p->thread_group.next, 2363 struct task_struct, thread_group); 2364} 2365 2366static inline int thread_group_empty(struct task_struct *p) 2367{ 2368 return list_empty(&p->thread_group); 2369} 2370 2371#define delay_group_leader(p) \ 2372 (thread_group_leader(p) && !thread_group_empty(p)) 2373 2374/* 2375 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2376 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2377 * pins the final release of task.io_context. Also protects ->cpuset and 2378 * ->cgroup.subsys[]. 2379 * 2380 * Nests both inside and outside of read_lock(&tasklist_lock). 2381 * It must not be nested with write_lock_irq(&tasklist_lock), 2382 * neither inside nor outside. 2383 */ 2384static inline void task_lock(struct task_struct *p) 2385{ 2386 spin_lock(&p->alloc_lock); 2387} 2388 2389static inline void task_unlock(struct task_struct *p) 2390{ 2391 spin_unlock(&p->alloc_lock); 2392} 2393 2394extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2395 unsigned long *flags); 2396 2397#define lock_task_sighand(tsk, flags) \ 2398({ struct sighand_struct *__ss; \ 2399 __cond_lock(&(tsk)->sighand->siglock, \ 2400 (__ss = __lock_task_sighand(tsk, flags))); \ 2401 __ss; \ 2402}) \ 2403 2404static inline void unlock_task_sighand(struct task_struct *tsk, 2405 unsigned long *flags) 2406{ 2407 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2408} 2409 2410#ifdef CONFIG_CGROUPS 2411static inline void threadgroup_change_begin(struct task_struct *tsk) 2412{ 2413 down_read(&tsk->signal->group_rwsem); 2414} 2415static inline void threadgroup_change_end(struct task_struct *tsk) 2416{ 2417 up_read(&tsk->signal->group_rwsem); 2418} 2419 2420/** 2421 * threadgroup_lock - lock threadgroup 2422 * @tsk: member task of the threadgroup to lock 2423 * 2424 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter 2425 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or 2426 * perform exec. This is useful for cases where the threadgroup needs to 2427 * stay stable across blockable operations. 2428 * 2429 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for 2430 * synchronization. While held, no new task will be added to threadgroup 2431 * and no existing live task will have its PF_EXITING set. 2432 * 2433 * During exec, a task goes and puts its thread group through unusual 2434 * changes. After de-threading, exclusive access is assumed to resources 2435 * which are usually shared by tasks in the same group - e.g. sighand may 2436 * be replaced with a new one. Also, the exec'ing task takes over group 2437 * leader role including its pid. Exclude these changes while locked by 2438 * grabbing cred_guard_mutex which is used to synchronize exec path. 2439 */ 2440static inline void threadgroup_lock(struct task_struct *tsk) 2441{ 2442 /* 2443 * exec uses exit for de-threading nesting group_rwsem inside 2444 * cred_guard_mutex. Grab cred_guard_mutex first. 2445 */ 2446 mutex_lock(&tsk->signal->cred_guard_mutex); 2447 down_write(&tsk->signal->group_rwsem); 2448} 2449 2450/** 2451 * threadgroup_unlock - unlock threadgroup 2452 * @tsk: member task of the threadgroup to unlock 2453 * 2454 * Reverse threadgroup_lock(). 2455 */ 2456static inline void threadgroup_unlock(struct task_struct *tsk) 2457{ 2458 up_write(&tsk->signal->group_rwsem); 2459 mutex_unlock(&tsk->signal->cred_guard_mutex); 2460} 2461#else 2462static inline void threadgroup_change_begin(struct task_struct *tsk) {} 2463static inline void threadgroup_change_end(struct task_struct *tsk) {} 2464static inline void threadgroup_lock(struct task_struct *tsk) {} 2465static inline void threadgroup_unlock(struct task_struct *tsk) {} 2466#endif 2467 2468#ifndef __HAVE_THREAD_FUNCTIONS 2469 2470#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2471#define task_stack_page(task) ((task)->stack) 2472 2473static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 2474{ 2475 *task_thread_info(p) = *task_thread_info(org); 2476 task_thread_info(p)->task = p; 2477} 2478 2479static inline unsigned long *end_of_stack(struct task_struct *p) 2480{ 2481 return (unsigned long *)(task_thread_info(p) + 1); 2482} 2483 2484#endif 2485 2486static inline int object_is_on_stack(void *obj) 2487{ 2488 void *stack = task_stack_page(current); 2489 2490 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 2491} 2492 2493extern void thread_info_cache_init(void); 2494 2495#ifdef CONFIG_DEBUG_STACK_USAGE 2496static inline unsigned long stack_not_used(struct task_struct *p) 2497{ 2498 unsigned long *n = end_of_stack(p); 2499 2500 do { /* Skip over canary */ 2501 n++; 2502 } while (!*n); 2503 2504 return (unsigned long)n - (unsigned long)end_of_stack(p); 2505} 2506#endif 2507 2508/* set thread flags in other task's structures 2509 * - see asm/thread_info.h for TIF_xxxx flags available 2510 */ 2511static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 2512{ 2513 set_ti_thread_flag(task_thread_info(tsk), flag); 2514} 2515 2516static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2517{ 2518 clear_ti_thread_flag(task_thread_info(tsk), flag); 2519} 2520 2521static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2522{ 2523 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2524} 2525 2526static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2527{ 2528 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2529} 2530 2531static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2532{ 2533 return test_ti_thread_flag(task_thread_info(tsk), flag); 2534} 2535 2536static inline void set_tsk_need_resched(struct task_struct *tsk) 2537{ 2538 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2539} 2540 2541static inline void clear_tsk_need_resched(struct task_struct *tsk) 2542{ 2543 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2544} 2545 2546static inline int test_tsk_need_resched(struct task_struct *tsk) 2547{ 2548 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2549} 2550 2551static inline int restart_syscall(void) 2552{ 2553 set_tsk_thread_flag(current, TIF_SIGPENDING); 2554 return -ERESTARTNOINTR; 2555} 2556 2557static inline int signal_pending(struct task_struct *p) 2558{ 2559 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2560} 2561 2562static inline int __fatal_signal_pending(struct task_struct *p) 2563{ 2564 return unlikely(sigismember(&p->pending.signal, SIGKILL)); 2565} 2566 2567static inline int fatal_signal_pending(struct task_struct *p) 2568{ 2569 return signal_pending(p) && __fatal_signal_pending(p); 2570} 2571 2572static inline int signal_pending_state(long state, struct task_struct *p) 2573{ 2574 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 2575 return 0; 2576 if (!signal_pending(p)) 2577 return 0; 2578 2579 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2580} 2581 2582static inline int need_resched(void) 2583{ 2584 return unlikely(test_thread_flag(TIF_NEED_RESCHED)); 2585} 2586 2587/* 2588 * cond_resched() and cond_resched_lock(): latency reduction via 2589 * explicit rescheduling in places that are safe. The return 2590 * value indicates whether a reschedule was done in fact. 2591 * cond_resched_lock() will drop the spinlock before scheduling, 2592 * cond_resched_softirq() will enable bhs before scheduling. 2593 */ 2594extern int _cond_resched(void); 2595 2596#define cond_resched() ({ \ 2597 __might_sleep(__FILE__, __LINE__, 0); \ 2598 _cond_resched(); \ 2599}) 2600 2601extern int __cond_resched_lock(spinlock_t *lock); 2602 2603#ifdef CONFIG_PREEMPT_COUNT 2604#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET 2605#else 2606#define PREEMPT_LOCK_OFFSET 0 2607#endif 2608 2609#define cond_resched_lock(lock) ({ \ 2610 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ 2611 __cond_resched_lock(lock); \ 2612}) 2613 2614extern int __cond_resched_softirq(void); 2615 2616#define cond_resched_softirq() ({ \ 2617 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 2618 __cond_resched_softirq(); \ 2619}) 2620 2621/* 2622 * Does a critical section need to be broken due to another 2623 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 2624 * but a general need for low latency) 2625 */ 2626static inline int spin_needbreak(spinlock_t *lock) 2627{ 2628#ifdef CONFIG_PREEMPT 2629 return spin_is_contended(lock); 2630#else 2631 return 0; 2632#endif 2633} 2634 2635/* 2636 * Thread group CPU time accounting. 2637 */ 2638void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 2639void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 2640 2641static inline void thread_group_cputime_init(struct signal_struct *sig) 2642{ 2643 raw_spin_lock_init(&sig->cputimer.lock); 2644} 2645 2646/* 2647 * Reevaluate whether the task has signals pending delivery. 2648 * Wake the task if so. 2649 * This is required every time the blocked sigset_t changes. 2650 * callers must hold sighand->siglock. 2651 */ 2652extern void recalc_sigpending_and_wake(struct task_struct *t); 2653extern void recalc_sigpending(void); 2654 2655extern void signal_wake_up(struct task_struct *t, int resume_stopped); 2656 2657/* 2658 * Wrappers for p->thread_info->cpu access. No-op on UP. 2659 */ 2660#ifdef CONFIG_SMP 2661 2662static inline unsigned int task_cpu(const struct task_struct *p) 2663{ 2664 return task_thread_info(p)->cpu; 2665} 2666 2667extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2668 2669#else 2670 2671static inline unsigned int task_cpu(const struct task_struct *p) 2672{ 2673 return 0; 2674} 2675 2676static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2677{ 2678} 2679 2680#endif /* CONFIG_SMP */ 2681 2682extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2683extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2684 2685extern void normalize_rt_tasks(void); 2686 2687#ifdef CONFIG_CGROUP_SCHED 2688 2689extern struct task_group root_task_group; 2690 2691extern struct task_group *sched_create_group(struct task_group *parent); 2692extern void sched_destroy_group(struct task_group *tg); 2693extern void sched_move_task(struct task_struct *tsk); 2694#ifdef CONFIG_FAIR_GROUP_SCHED 2695extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 2696extern unsigned long sched_group_shares(struct task_group *tg); 2697#endif 2698#ifdef CONFIG_RT_GROUP_SCHED 2699extern int sched_group_set_rt_runtime(struct task_group *tg, 2700 long rt_runtime_us); 2701extern long sched_group_rt_runtime(struct task_group *tg); 2702extern int sched_group_set_rt_period(struct task_group *tg, 2703 long rt_period_us); 2704extern long sched_group_rt_period(struct task_group *tg); 2705extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 2706#endif 2707#endif 2708 2709extern int task_can_switch_user(struct user_struct *up, 2710 struct task_struct *tsk); 2711 2712#ifdef CONFIG_TASK_XACCT 2713static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2714{ 2715 tsk->ioac.rchar += amt; 2716} 2717 2718static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2719{ 2720 tsk->ioac.wchar += amt; 2721} 2722 2723static inline void inc_syscr(struct task_struct *tsk) 2724{ 2725 tsk->ioac.syscr++; 2726} 2727 2728static inline void inc_syscw(struct task_struct *tsk) 2729{ 2730 tsk->ioac.syscw++; 2731} 2732#else 2733static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2734{ 2735} 2736 2737static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2738{ 2739} 2740 2741static inline void inc_syscr(struct task_struct *tsk) 2742{ 2743} 2744 2745static inline void inc_syscw(struct task_struct *tsk) 2746{ 2747} 2748#endif 2749 2750#ifndef TASK_SIZE_OF 2751#define TASK_SIZE_OF(tsk) TASK_SIZE 2752#endif 2753 2754#ifdef CONFIG_MM_OWNER 2755extern void mm_update_next_owner(struct mm_struct *mm); 2756extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2757#else 2758static inline void mm_update_next_owner(struct mm_struct *mm) 2759{ 2760} 2761 2762static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 2763{ 2764} 2765#endif /* CONFIG_MM_OWNER */ 2766 2767static inline unsigned long task_rlimit(const struct task_struct *tsk, 2768 unsigned int limit) 2769{ 2770 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); 2771} 2772 2773static inline unsigned long task_rlimit_max(const struct task_struct *tsk, 2774 unsigned int limit) 2775{ 2776 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); 2777} 2778 2779static inline unsigned long rlimit(unsigned int limit) 2780{ 2781 return task_rlimit(current, limit); 2782} 2783 2784static inline unsigned long rlimit_max(unsigned int limit) 2785{ 2786 return task_rlimit_max(current, limit); 2787} 2788 2789#endif /* __KERNEL__ */ 2790 2791#endif 2792