1#ifndef _LINUX_SCHED_H 2#define _LINUX_SCHED_H 3 4#include <uapi/linux/sched.h> 5 6#include <linux/sched/prio.h> 7 8 9struct sched_param { 10 int sched_priority; 11}; 12 13#include <asm/param.h> /* for HZ */ 14 15#include <linux/capability.h> 16#include <linux/threads.h> 17#include <linux/kernel.h> 18#include <linux/types.h> 19#include <linux/timex.h> 20#include <linux/jiffies.h> 21#include <linux/plist.h> 22#include <linux/rbtree.h> 23#include <linux/thread_info.h> 24#include <linux/cpumask.h> 25#include <linux/errno.h> 26#include <linux/nodemask.h> 27#include <linux/mm_types.h> 28#include <linux/preempt_mask.h> 29 30#include <asm/page.h> 31#include <asm/ptrace.h> 32#include <linux/cputime.h> 33 34#include <linux/smp.h> 35#include <linux/sem.h> 36#include <linux/shm.h> 37#include <linux/signal.h> 38#include <linux/compiler.h> 39#include <linux/completion.h> 40#include <linux/pid.h> 41#include <linux/percpu.h> 42#include <linux/topology.h> 43#include <linux/proportions.h> 44#include <linux/seccomp.h> 45#include <linux/rcupdate.h> 46#include <linux/rculist.h> 47#include <linux/rtmutex.h> 48 49#include <linux/time.h> 50#include <linux/param.h> 51#include <linux/resource.h> 52#include <linux/timer.h> 53#include <linux/hrtimer.h> 54#include <linux/task_io_accounting.h> 55#include <linux/latencytop.h> 56#include <linux/cred.h> 57#include <linux/llist.h> 58#include <linux/uidgid.h> 59#include <linux/gfp.h> 60#include <linux/magic.h> 61 62#include <asm/processor.h> 63 64#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ 65 66/* 67 * Extended scheduling parameters data structure. 68 * 69 * This is needed because the original struct sched_param can not be 70 * altered without introducing ABI issues with legacy applications 71 * (e.g., in sched_getparam()). 72 * 73 * However, the possibility of specifying more than just a priority for 74 * the tasks may be useful for a wide variety of application fields, e.g., 75 * multimedia, streaming, automation and control, and many others. 76 * 77 * This variant (sched_attr) is meant at describing a so-called 78 * sporadic time-constrained task. In such model a task is specified by: 79 * - the activation period or minimum instance inter-arrival time; 80 * - the maximum (or average, depending on the actual scheduling 81 * discipline) computation time of all instances, a.k.a. runtime; 82 * - the deadline (relative to the actual activation time) of each 83 * instance. 84 * Very briefly, a periodic (sporadic) task asks for the execution of 85 * some specific computation --which is typically called an instance-- 86 * (at most) every period. Moreover, each instance typically lasts no more 87 * than the runtime and must be completed by time instant t equal to 88 * the instance activation time + the deadline. 89 * 90 * This is reflected by the actual fields of the sched_attr structure: 91 * 92 * @size size of the structure, for fwd/bwd compat. 93 * 94 * @sched_policy task's scheduling policy 95 * @sched_flags for customizing the scheduler behaviour 96 * @sched_nice task's nice value (SCHED_NORMAL/BATCH) 97 * @sched_priority task's static priority (SCHED_FIFO/RR) 98 * @sched_deadline representative of the task's deadline 99 * @sched_runtime representative of the task's runtime 100 * @sched_period representative of the task's period 101 * 102 * Given this task model, there are a multiplicity of scheduling algorithms 103 * and policies, that can be used to ensure all the tasks will make their 104 * timing constraints. 105 * 106 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the 107 * only user of this new interface. More information about the algorithm 108 * available in the scheduling class file or in Documentation/. 109 */ 110struct sched_attr { 111 u32 size; 112 113 u32 sched_policy; 114 u64 sched_flags; 115 116 /* SCHED_NORMAL, SCHED_BATCH */ 117 s32 sched_nice; 118 119 /* SCHED_FIFO, SCHED_RR */ 120 u32 sched_priority; 121 122 /* SCHED_DEADLINE */ 123 u64 sched_runtime; 124 u64 sched_deadline; 125 u64 sched_period; 126}; 127 128struct exec_domain; 129struct futex_pi_state; 130struct robust_list_head; 131struct bio_list; 132struct fs_struct; 133struct perf_event_context; 134struct blk_plug; 135struct filename; 136 137#define VMACACHE_BITS 2 138#define VMACACHE_SIZE (1U << VMACACHE_BITS) 139#define VMACACHE_MASK (VMACACHE_SIZE - 1) 140 141/* 142 * These are the constant used to fake the fixed-point load-average 143 * counting. Some notes: 144 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 145 * a load-average precision of 10 bits integer + 11 bits fractional 146 * - if you want to count load-averages more often, you need more 147 * precision, or rounding will get you. With 2-second counting freq, 148 * the EXP_n values would be 1981, 2034 and 2043 if still using only 149 * 11 bit fractions. 150 */ 151extern unsigned long avenrun[]; /* Load averages */ 152extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); 153 154#define FSHIFT 11 /* nr of bits of precision */ 155#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 156#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ 157#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 158#define EXP_5 2014 /* 1/exp(5sec/5min) */ 159#define EXP_15 2037 /* 1/exp(5sec/15min) */ 160 161#define CALC_LOAD(load,exp,n) \ 162 load *= exp; \ 163 load += n*(FIXED_1-exp); \ 164 load >>= FSHIFT; 165 166extern unsigned long total_forks; 167extern int nr_threads; 168DECLARE_PER_CPU(unsigned long, process_counts); 169extern int nr_processes(void); 170extern unsigned long nr_running(void); 171extern bool single_task_running(void); 172extern unsigned long nr_iowait(void); 173extern unsigned long nr_iowait_cpu(int cpu); 174extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); 175 176extern void calc_global_load(unsigned long ticks); 177extern void update_cpu_load_nohz(void); 178 179extern unsigned long get_parent_ip(unsigned long addr); 180 181extern void dump_cpu_task(int cpu); 182 183struct seq_file; 184struct cfs_rq; 185struct task_group; 186#ifdef CONFIG_SCHED_DEBUG 187extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 188extern void proc_sched_set_task(struct task_struct *p); 189extern void 190print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 191#endif 192 193/* 194 * Task state bitmask. NOTE! These bits are also 195 * encoded in fs/proc/array.c: get_task_state(). 196 * 197 * We have two separate sets of flags: task->state 198 * is about runnability, while task->exit_state are 199 * about the task exiting. Confusing, but this way 200 * modifying one set can't modify the other one by 201 * mistake. 202 */ 203#define TASK_RUNNING 0 204#define TASK_INTERRUPTIBLE 1 205#define TASK_UNINTERRUPTIBLE 2 206#define __TASK_STOPPED 4 207#define __TASK_TRACED 8 208/* in tsk->exit_state */ 209#define EXIT_DEAD 16 210#define EXIT_ZOMBIE 32 211#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 212/* in tsk->state again */ 213#define TASK_DEAD 64 214#define TASK_WAKEKILL 128 215#define TASK_WAKING 256 216#define TASK_PARKED 512 217#define TASK_STATE_MAX 1024 218 219#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP" 220 221extern char ___assert_task_state[1 - 2*!!( 222 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 223 224/* Convenience macros for the sake of set_task_state */ 225#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 226#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 227#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 228 229/* Convenience macros for the sake of wake_up */ 230#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 231#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 232 233/* get_task_state() */ 234#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 235 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 236 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 237 238#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 239#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 240#define task_is_stopped_or_traced(task) \ 241 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 242#define task_contributes_to_load(task) \ 243 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 244 (task->flags & PF_FROZEN) == 0) 245 246#define __set_task_state(tsk, state_value) \ 247 do { (tsk)->state = (state_value); } while (0) 248#define set_task_state(tsk, state_value) \ 249 set_mb((tsk)->state, (state_value)) 250 251/* 252 * set_current_state() includes a barrier so that the write of current->state 253 * is correctly serialised wrt the caller's subsequent test of whether to 254 * actually sleep: 255 * 256 * set_current_state(TASK_UNINTERRUPTIBLE); 257 * if (do_i_need_to_sleep()) 258 * schedule(); 259 * 260 * If the caller does not need such serialisation then use __set_current_state() 261 */ 262#define __set_current_state(state_value) \ 263 do { current->state = (state_value); } while (0) 264#define set_current_state(state_value) \ 265 set_mb(current->state, (state_value)) 266 267/* Task command name length */ 268#define TASK_COMM_LEN 16 269 270#include <linux/spinlock.h> 271 272/* 273 * This serializes "schedule()" and also protects 274 * the run-queue from deletions/modifications (but 275 * _adding_ to the beginning of the run-queue has 276 * a separate lock). 277 */ 278extern rwlock_t tasklist_lock; 279extern spinlock_t mmlist_lock; 280 281struct task_struct; 282 283#ifdef CONFIG_PROVE_RCU 284extern int lockdep_tasklist_lock_is_held(void); 285#endif /* #ifdef CONFIG_PROVE_RCU */ 286 287extern void sched_init(void); 288extern void sched_init_smp(void); 289extern asmlinkage void schedule_tail(struct task_struct *prev); 290extern void init_idle(struct task_struct *idle, int cpu); 291extern void init_idle_bootup_task(struct task_struct *idle); 292 293extern int runqueue_is_locked(int cpu); 294 295#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 296extern void nohz_balance_enter_idle(int cpu); 297extern void set_cpu_sd_state_idle(void); 298extern int get_nohz_timer_target(int pinned); 299#else 300static inline void nohz_balance_enter_idle(int cpu) { } 301static inline void set_cpu_sd_state_idle(void) { } 302static inline int get_nohz_timer_target(int pinned) 303{ 304 return smp_processor_id(); 305} 306#endif 307 308/* 309 * Only dump TASK_* tasks. (0 for all tasks) 310 */ 311extern void show_state_filter(unsigned long state_filter); 312 313static inline void show_state(void) 314{ 315 show_state_filter(0); 316} 317 318extern void show_regs(struct pt_regs *); 319 320/* 321 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current 322 * task), SP is the stack pointer of the first frame that should be shown in the back 323 * trace (or NULL if the entire call-chain of the task should be shown). 324 */ 325extern void show_stack(struct task_struct *task, unsigned long *sp); 326 327void io_schedule(void); 328long io_schedule_timeout(long timeout); 329 330extern void cpu_init (void); 331extern void trap_init(void); 332extern void update_process_times(int user); 333extern void scheduler_tick(void); 334 335extern void sched_show_task(struct task_struct *p); 336 337#ifdef CONFIG_LOCKUP_DETECTOR 338extern void touch_softlockup_watchdog(void); 339extern void touch_softlockup_watchdog_sync(void); 340extern void touch_all_softlockup_watchdogs(void); 341extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, 342 void __user *buffer, 343 size_t *lenp, loff_t *ppos); 344extern unsigned int softlockup_panic; 345void lockup_detector_init(void); 346#else 347static inline void touch_softlockup_watchdog(void) 348{ 349} 350static inline void touch_softlockup_watchdog_sync(void) 351{ 352} 353static inline void touch_all_softlockup_watchdogs(void) 354{ 355} 356static inline void lockup_detector_init(void) 357{ 358} 359#endif 360 361#ifdef CONFIG_DETECT_HUNG_TASK 362void reset_hung_task_detector(void); 363#else 364static inline void reset_hung_task_detector(void) 365{ 366} 367#endif 368 369/* Attach to any functions which should be ignored in wchan output. */ 370#define __sched __attribute__((__section__(".sched.text"))) 371 372/* Linker adds these: start and end of __sched functions */ 373extern char __sched_text_start[], __sched_text_end[]; 374 375/* Is this address in the __sched functions? */ 376extern int in_sched_functions(unsigned long addr); 377 378#define MAX_SCHEDULE_TIMEOUT LONG_MAX 379extern signed long schedule_timeout(signed long timeout); 380extern signed long schedule_timeout_interruptible(signed long timeout); 381extern signed long schedule_timeout_killable(signed long timeout); 382extern signed long schedule_timeout_uninterruptible(signed long timeout); 383asmlinkage void schedule(void); 384extern void schedule_preempt_disabled(void); 385 386struct nsproxy; 387struct user_namespace; 388 389#ifdef CONFIG_MMU 390extern void arch_pick_mmap_layout(struct mm_struct *mm); 391extern unsigned long 392arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 393 unsigned long, unsigned long); 394extern unsigned long 395arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 396 unsigned long len, unsigned long pgoff, 397 unsigned long flags); 398#else 399static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 400#endif 401 402#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ 403#define SUID_DUMP_USER 1 /* Dump as user of process */ 404#define SUID_DUMP_ROOT 2 /* Dump as root */ 405 406/* mm flags */ 407 408/* for SUID_DUMP_* above */ 409#define MMF_DUMPABLE_BITS 2 410#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) 411 412extern void set_dumpable(struct mm_struct *mm, int value); 413/* 414 * This returns the actual value of the suid_dumpable flag. For things 415 * that are using this for checking for privilege transitions, it must 416 * test against SUID_DUMP_USER rather than treating it as a boolean 417 * value. 418 */ 419static inline int __get_dumpable(unsigned long mm_flags) 420{ 421 return mm_flags & MMF_DUMPABLE_MASK; 422} 423 424static inline int get_dumpable(struct mm_struct *mm) 425{ 426 return __get_dumpable(mm->flags); 427} 428 429/* coredump filter bits */ 430#define MMF_DUMP_ANON_PRIVATE 2 431#define MMF_DUMP_ANON_SHARED 3 432#define MMF_DUMP_MAPPED_PRIVATE 4 433#define MMF_DUMP_MAPPED_SHARED 5 434#define MMF_DUMP_ELF_HEADERS 6 435#define MMF_DUMP_HUGETLB_PRIVATE 7 436#define MMF_DUMP_HUGETLB_SHARED 8 437 438#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 439#define MMF_DUMP_FILTER_BITS 7 440#define MMF_DUMP_FILTER_MASK \ 441 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 442#define MMF_DUMP_FILTER_DEFAULT \ 443 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ 444 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) 445 446#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS 447# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) 448#else 449# define MMF_DUMP_MASK_DEFAULT_ELF 0 450#endif 451 /* leave room for more dump flags */ 452#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ 453#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ 454#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ 455 456#define MMF_HAS_UPROBES 19 /* has uprobes */ 457#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ 458 459#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) 460 461struct sighand_struct { 462 atomic_t count; 463 struct k_sigaction action[_NSIG]; 464 spinlock_t siglock; 465 wait_queue_head_t signalfd_wqh; 466}; 467 468struct pacct_struct { 469 int ac_flag; 470 long ac_exitcode; 471 unsigned long ac_mem; 472 cputime_t ac_utime, ac_stime; 473 unsigned long ac_minflt, ac_majflt; 474}; 475 476struct cpu_itimer { 477 cputime_t expires; 478 cputime_t incr; 479 u32 error; 480 u32 incr_error; 481}; 482 483/** 484 * struct cputime - snaphsot of system and user cputime 485 * @utime: time spent in user mode 486 * @stime: time spent in system mode 487 * 488 * Gathers a generic snapshot of user and system time. 489 */ 490struct cputime { 491 cputime_t utime; 492 cputime_t stime; 493}; 494 495/** 496 * struct task_cputime - collected CPU time counts 497 * @utime: time spent in user mode, in &cputime_t units 498 * @stime: time spent in kernel mode, in &cputime_t units 499 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 500 * 501 * This is an extension of struct cputime that includes the total runtime 502 * spent by the task from the scheduler point of view. 503 * 504 * As a result, this structure groups together three kinds of CPU time 505 * that are tracked for threads and thread groups. Most things considering 506 * CPU time want to group these counts together and treat all three 507 * of them in parallel. 508 */ 509struct task_cputime { 510 cputime_t utime; 511 cputime_t stime; 512 unsigned long long sum_exec_runtime; 513}; 514/* Alternate field names when used to cache expirations. */ 515#define prof_exp stime 516#define virt_exp utime 517#define sched_exp sum_exec_runtime 518 519#define INIT_CPUTIME \ 520 (struct task_cputime) { \ 521 .utime = 0, \ 522 .stime = 0, \ 523 .sum_exec_runtime = 0, \ 524 } 525 526#ifdef CONFIG_PREEMPT_COUNT 527#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 528#else 529#define PREEMPT_DISABLED PREEMPT_ENABLED 530#endif 531 532/* 533 * Disable preemption until the scheduler is running. 534 * Reset by start_kernel()->sched_init()->init_idle(). 535 * 536 * We include PREEMPT_ACTIVE to avoid cond_resched() from working 537 * before the scheduler is active -- see should_resched(). 538 */ 539#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) 540 541/** 542 * struct thread_group_cputimer - thread group interval timer counts 543 * @cputime: thread group interval timers. 544 * @running: non-zero when there are timers running and 545 * @cputime receives updates. 546 * @lock: lock for fields in this struct. 547 * 548 * This structure contains the version of task_cputime, above, that is 549 * used for thread group CPU timer calculations. 550 */ 551struct thread_group_cputimer { 552 struct task_cputime cputime; 553 int running; 554 raw_spinlock_t lock; 555}; 556 557#include <linux/rwsem.h> 558struct autogroup; 559 560/* 561 * NOTE! "signal_struct" does not have its own 562 * locking, because a shared signal_struct always 563 * implies a shared sighand_struct, so locking 564 * sighand_struct is always a proper superset of 565 * the locking of signal_struct. 566 */ 567struct signal_struct { 568 atomic_t sigcnt; 569 atomic_t live; 570 int nr_threads; 571 struct list_head thread_head; 572 573 wait_queue_head_t wait_chldexit; /* for wait4() */ 574 575 /* current thread group signal load-balancing target: */ 576 struct task_struct *curr_target; 577 578 /* shared signal handling: */ 579 struct sigpending shared_pending; 580 581 /* thread group exit support */ 582 int group_exit_code; 583 /* overloaded: 584 * - notify group_exit_task when ->count is equal to notify_count 585 * - everyone except group_exit_task is stopped during signal delivery 586 * of fatal signals, group_exit_task processes the signal. 587 */ 588 int notify_count; 589 struct task_struct *group_exit_task; 590 591 /* thread group stop support, overloads group_exit_code too */ 592 int group_stop_count; 593 unsigned int flags; /* see SIGNAL_* flags below */ 594 595 /* 596 * PR_SET_CHILD_SUBREAPER marks a process, like a service 597 * manager, to re-parent orphan (double-forking) child processes 598 * to this process instead of 'init'. The service manager is 599 * able to receive SIGCHLD signals and is able to investigate 600 * the process until it calls wait(). All children of this 601 * process will inherit a flag if they should look for a 602 * child_subreaper process at exit. 603 */ 604 unsigned int is_child_subreaper:1; 605 unsigned int has_child_subreaper:1; 606 607 /* POSIX.1b Interval Timers */ 608 int posix_timer_id; 609 struct list_head posix_timers; 610 611 /* ITIMER_REAL timer for the process */ 612 struct hrtimer real_timer; 613 struct pid *leader_pid; 614 ktime_t it_real_incr; 615 616 /* 617 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use 618 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these 619 * values are defined to 0 and 1 respectively 620 */ 621 struct cpu_itimer it[2]; 622 623 /* 624 * Thread group totals for process CPU timers. 625 * See thread_group_cputimer(), et al, for details. 626 */ 627 struct thread_group_cputimer cputimer; 628 629 /* Earliest-expiration cache. */ 630 struct task_cputime cputime_expires; 631 632 struct list_head cpu_timers[3]; 633 634 struct pid *tty_old_pgrp; 635 636 /* boolean value for session group leader */ 637 int leader; 638 639 struct tty_struct *tty; /* NULL if no tty */ 640 641#ifdef CONFIG_SCHED_AUTOGROUP 642 struct autogroup *autogroup; 643#endif 644 /* 645 * Cumulative resource counters for dead threads in the group, 646 * and for reaped dead child processes forked by this group. 647 * Live threads maintain their own counters and add to these 648 * in __exit_signal, except for the group leader. 649 */ 650 seqlock_t stats_lock; 651 cputime_t utime, stime, cutime, cstime; 652 cputime_t gtime; 653 cputime_t cgtime; 654#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 655 struct cputime prev_cputime; 656#endif 657 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 658 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 659 unsigned long inblock, oublock, cinblock, coublock; 660 unsigned long maxrss, cmaxrss; 661 struct task_io_accounting ioac; 662 663 /* 664 * Cumulative ns of schedule CPU time fo dead threads in the 665 * group, not including a zombie group leader, (This only differs 666 * from jiffies_to_ns(utime + stime) if sched_clock uses something 667 * other than jiffies.) 668 */ 669 unsigned long long sum_sched_runtime; 670 671 /* 672 * We don't bother to synchronize most readers of this at all, 673 * because there is no reader checking a limit that actually needs 674 * to get both rlim_cur and rlim_max atomically, and either one 675 * alone is a single word that can safely be read normally. 676 * getrlimit/setrlimit use task_lock(current->group_leader) to 677 * protect this instead of the siglock, because they really 678 * have no need to disable irqs. 679 */ 680 struct rlimit rlim[RLIM_NLIMITS]; 681 682#ifdef CONFIG_BSD_PROCESS_ACCT 683 struct pacct_struct pacct; /* per-process accounting information */ 684#endif 685#ifdef CONFIG_TASKSTATS 686 struct taskstats *stats; 687#endif 688#ifdef CONFIG_AUDIT 689 unsigned audit_tty; 690 unsigned audit_tty_log_passwd; 691 struct tty_audit_buf *tty_audit_buf; 692#endif 693#ifdef CONFIG_CGROUPS 694 /* 695 * group_rwsem prevents new tasks from entering the threadgroup and 696 * member tasks from exiting,a more specifically, setting of 697 * PF_EXITING. fork and exit paths are protected with this rwsem 698 * using threadgroup_change_begin/end(). Users which require 699 * threadgroup to remain stable should use threadgroup_[un]lock() 700 * which also takes care of exec path. Currently, cgroup is the 701 * only user. 702 */ 703 struct rw_semaphore group_rwsem; 704#endif 705 706 oom_flags_t oom_flags; 707 short oom_score_adj; /* OOM kill score adjustment */ 708 short oom_score_adj_min; /* OOM kill score adjustment min value. 709 * Only settable by CAP_SYS_RESOURCE. */ 710 711 struct mutex cred_guard_mutex; /* guard against foreign influences on 712 * credential calculations 713 * (notably. ptrace) */ 714}; 715 716/* 717 * Bits in flags field of signal_struct. 718 */ 719#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 720#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 721#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 722#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ 723/* 724 * Pending notifications to parent. 725 */ 726#define SIGNAL_CLD_STOPPED 0x00000010 727#define SIGNAL_CLD_CONTINUED 0x00000020 728#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 729 730#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 731 732/* If true, all threads except ->group_exit_task have pending SIGKILL */ 733static inline int signal_group_exit(const struct signal_struct *sig) 734{ 735 return (sig->flags & SIGNAL_GROUP_EXIT) || 736 (sig->group_exit_task != NULL); 737} 738 739/* 740 * Some day this will be a full-fledged user tracking system.. 741 */ 742struct user_struct { 743 atomic_t __count; /* reference count */ 744 atomic_t processes; /* How many processes does this user have? */ 745 atomic_t sigpending; /* How many pending signals does this user have? */ 746#ifdef CONFIG_INOTIFY_USER 747 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 748 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 749#endif 750#ifdef CONFIG_FANOTIFY 751 atomic_t fanotify_listeners; 752#endif 753#ifdef CONFIG_EPOLL 754 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ 755#endif 756#ifdef CONFIG_POSIX_MQUEUE 757 /* protected by mq_lock */ 758 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 759#endif 760 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 761 762#ifdef CONFIG_KEYS 763 struct key *uid_keyring; /* UID specific keyring */ 764 struct key *session_keyring; /* UID's default session keyring */ 765#endif 766 767 /* Hash table maintenance information */ 768 struct hlist_node uidhash_node; 769 kuid_t uid; 770 771#ifdef CONFIG_PERF_EVENTS 772 atomic_long_t locked_vm; 773#endif 774}; 775 776extern int uids_sysfs_init(void); 777 778extern struct user_struct *find_user(kuid_t); 779 780extern struct user_struct root_user; 781#define INIT_USER (&root_user) 782 783 784struct backing_dev_info; 785struct reclaim_state; 786 787#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 788struct sched_info { 789 /* cumulative counters */ 790 unsigned long pcount; /* # of times run on this cpu */ 791 unsigned long long run_delay; /* time spent waiting on a runqueue */ 792 793 /* timestamps */ 794 unsigned long long last_arrival,/* when we last ran on a cpu */ 795 last_queued; /* when we were last queued to run */ 796}; 797#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 798 799#ifdef CONFIG_TASK_DELAY_ACCT 800struct task_delay_info { 801 spinlock_t lock; 802 unsigned int flags; /* Private per-task flags */ 803 804 /* For each stat XXX, add following, aligned appropriately 805 * 806 * struct timespec XXX_start, XXX_end; 807 * u64 XXX_delay; 808 * u32 XXX_count; 809 * 810 * Atomicity of updates to XXX_delay, XXX_count protected by 811 * single lock above (split into XXX_lock if contention is an issue). 812 */ 813 814 /* 815 * XXX_count is incremented on every XXX operation, the delay 816 * associated with the operation is added to XXX_delay. 817 * XXX_delay contains the accumulated delay time in nanoseconds. 818 */ 819 u64 blkio_start; /* Shared by blkio, swapin */ 820 u64 blkio_delay; /* wait for sync block io completion */ 821 u64 swapin_delay; /* wait for swapin block io completion */ 822 u32 blkio_count; /* total count of the number of sync block */ 823 /* io operations performed */ 824 u32 swapin_count; /* total count of the number of swapin block */ 825 /* io operations performed */ 826 827 u64 freepages_start; 828 u64 freepages_delay; /* wait for memory reclaim */ 829 u32 freepages_count; /* total count of memory reclaim */ 830}; 831#endif /* CONFIG_TASK_DELAY_ACCT */ 832 833static inline int sched_info_on(void) 834{ 835#ifdef CONFIG_SCHEDSTATS 836 return 1; 837#elif defined(CONFIG_TASK_DELAY_ACCT) 838 extern int delayacct_on; 839 return delayacct_on; 840#else 841 return 0; 842#endif 843} 844 845enum cpu_idle_type { 846 CPU_IDLE, 847 CPU_NOT_IDLE, 848 CPU_NEWLY_IDLE, 849 CPU_MAX_IDLE_TYPES 850}; 851 852/* 853 * Increase resolution of cpu_capacity calculations 854 */ 855#define SCHED_CAPACITY_SHIFT 10 856#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 857 858/* 859 * sched-domains (multiprocessor balancing) declarations: 860 */ 861#ifdef CONFIG_SMP 862#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ 863#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ 864#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ 865#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 866#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ 867#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 868#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */ 869#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ 870#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 871#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 872#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 873#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 874#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ 875#define SD_NUMA 0x4000 /* cross-node balancing */ 876 877#ifdef CONFIG_SCHED_SMT 878static inline int cpu_smt_flags(void) 879{ 880 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 881} 882#endif 883 884#ifdef CONFIG_SCHED_MC 885static inline int cpu_core_flags(void) 886{ 887 return SD_SHARE_PKG_RESOURCES; 888} 889#endif 890 891#ifdef CONFIG_NUMA 892static inline int cpu_numa_flags(void) 893{ 894 return SD_NUMA; 895} 896#endif 897 898struct sched_domain_attr { 899 int relax_domain_level; 900}; 901 902#define SD_ATTR_INIT (struct sched_domain_attr) { \ 903 .relax_domain_level = -1, \ 904} 905 906extern int sched_domain_level_max; 907 908struct sched_group; 909 910struct sched_domain { 911 /* These fields must be setup */ 912 struct sched_domain *parent; /* top domain must be null terminated */ 913 struct sched_domain *child; /* bottom domain must be null terminated */ 914 struct sched_group *groups; /* the balancing groups of the domain */ 915 unsigned long min_interval; /* Minimum balance interval ms */ 916 unsigned long max_interval; /* Maximum balance interval ms */ 917 unsigned int busy_factor; /* less balancing by factor if busy */ 918 unsigned int imbalance_pct; /* No balance until over watermark */ 919 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 920 unsigned int busy_idx; 921 unsigned int idle_idx; 922 unsigned int newidle_idx; 923 unsigned int wake_idx; 924 unsigned int forkexec_idx; 925 unsigned int smt_gain; 926 927 int nohz_idle; /* NOHZ IDLE status */ 928 int flags; /* See SD_* */ 929 int level; 930 931 /* Runtime fields. */ 932 unsigned long last_balance; /* init to jiffies. units in jiffies */ 933 unsigned int balance_interval; /* initialise to 1. units in ms. */ 934 unsigned int nr_balance_failed; /* initialise to 0 */ 935 936 /* idle_balance() stats */ 937 u64 max_newidle_lb_cost; 938 unsigned long next_decay_max_lb_cost; 939 940#ifdef CONFIG_SCHEDSTATS 941 /* load_balance() stats */ 942 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 943 unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 944 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 945 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; 946 unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 947 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 948 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 949 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 950 951 /* Active load balancing */ 952 unsigned int alb_count; 953 unsigned int alb_failed; 954 unsigned int alb_pushed; 955 956 /* SD_BALANCE_EXEC stats */ 957 unsigned int sbe_count; 958 unsigned int sbe_balanced; 959 unsigned int sbe_pushed; 960 961 /* SD_BALANCE_FORK stats */ 962 unsigned int sbf_count; 963 unsigned int sbf_balanced; 964 unsigned int sbf_pushed; 965 966 /* try_to_wake_up() stats */ 967 unsigned int ttwu_wake_remote; 968 unsigned int ttwu_move_affine; 969 unsigned int ttwu_move_balance; 970#endif 971#ifdef CONFIG_SCHED_DEBUG 972 char *name; 973#endif 974 union { 975 void *private; /* used during construction */ 976 struct rcu_head rcu; /* used during destruction */ 977 }; 978 979 unsigned int span_weight; 980 /* 981 * Span of all CPUs in this domain. 982 * 983 * NOTE: this field is variable length. (Allocated dynamically 984 * by attaching extra space to the end of the structure, 985 * depending on how many CPUs the kernel has booted up with) 986 */ 987 unsigned long span[0]; 988}; 989 990static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 991{ 992 return to_cpumask(sd->span); 993} 994 995extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 996 struct sched_domain_attr *dattr_new); 997 998/* Allocate an array of sched domains, for partition_sched_domains(). */ 999cpumask_var_t *alloc_sched_domains(unsigned int ndoms); 1000void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); 1001 1002bool cpus_share_cache(int this_cpu, int that_cpu); 1003 1004typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 1005typedef int (*sched_domain_flags_f)(void); 1006 1007#define SDTL_OVERLAP 0x01 1008 1009struct sd_data { 1010 struct sched_domain **__percpu sd; 1011 struct sched_group **__percpu sg; 1012 struct sched_group_capacity **__percpu sgc; 1013}; 1014 1015struct sched_domain_topology_level { 1016 sched_domain_mask_f mask; 1017 sched_domain_flags_f sd_flags; 1018 int flags; 1019 int numa_level; 1020 struct sd_data data; 1021#ifdef CONFIG_SCHED_DEBUG 1022 char *name; 1023#endif 1024}; 1025 1026extern struct sched_domain_topology_level *sched_domain_topology; 1027 1028extern void set_sched_topology(struct sched_domain_topology_level *tl); 1029extern void wake_up_if_idle(int cpu); 1030 1031#ifdef CONFIG_SCHED_DEBUG 1032# define SD_INIT_NAME(type) .name = #type 1033#else 1034# define SD_INIT_NAME(type) 1035#endif 1036 1037#else /* CONFIG_SMP */ 1038 1039struct sched_domain_attr; 1040 1041static inline void 1042partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1043 struct sched_domain_attr *dattr_new) 1044{ 1045} 1046 1047static inline bool cpus_share_cache(int this_cpu, int that_cpu) 1048{ 1049 return true; 1050} 1051 1052#endif /* !CONFIG_SMP */ 1053 1054 1055struct io_context; /* See blkdev.h */ 1056 1057 1058#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 1059extern void prefetch_stack(struct task_struct *t); 1060#else 1061static inline void prefetch_stack(struct task_struct *t) { } 1062#endif 1063 1064struct audit_context; /* See audit.c */ 1065struct mempolicy; 1066struct pipe_inode_info; 1067struct uts_namespace; 1068 1069struct load_weight { 1070 unsigned long weight; 1071 u32 inv_weight; 1072}; 1073 1074struct sched_avg { 1075 /* 1076 * These sums represent an infinite geometric series and so are bound 1077 * above by 1024/(1-y). Thus we only need a u32 to store them for all 1078 * choices of y < 1-2^(-32)*1024. 1079 */ 1080 u32 runnable_avg_sum, runnable_avg_period; 1081 u64 last_runnable_update; 1082 s64 decay_count; 1083 unsigned long load_avg_contrib; 1084}; 1085 1086#ifdef CONFIG_SCHEDSTATS 1087struct sched_statistics { 1088 u64 wait_start; 1089 u64 wait_max; 1090 u64 wait_count; 1091 u64 wait_sum; 1092 u64 iowait_count; 1093 u64 iowait_sum; 1094 1095 u64 sleep_start; 1096 u64 sleep_max; 1097 s64 sum_sleep_runtime; 1098 1099 u64 block_start; 1100 u64 block_max; 1101 u64 exec_max; 1102 u64 slice_max; 1103 1104 u64 nr_migrations_cold; 1105 u64 nr_failed_migrations_affine; 1106 u64 nr_failed_migrations_running; 1107 u64 nr_failed_migrations_hot; 1108 u64 nr_forced_migrations; 1109 1110 u64 nr_wakeups; 1111 u64 nr_wakeups_sync; 1112 u64 nr_wakeups_migrate; 1113 u64 nr_wakeups_local; 1114 u64 nr_wakeups_remote; 1115 u64 nr_wakeups_affine; 1116 u64 nr_wakeups_affine_attempts; 1117 u64 nr_wakeups_passive; 1118 u64 nr_wakeups_idle; 1119}; 1120#endif 1121 1122struct sched_entity { 1123 struct load_weight load; /* for load-balancing */ 1124 struct rb_node run_node; 1125 struct list_head group_node; 1126 unsigned int on_rq; 1127 1128 u64 exec_start; 1129 u64 sum_exec_runtime; 1130 u64 vruntime; 1131 u64 prev_sum_exec_runtime; 1132 1133 u64 nr_migrations; 1134 1135#ifdef CONFIG_SCHEDSTATS 1136 struct sched_statistics statistics; 1137#endif 1138 1139#ifdef CONFIG_FAIR_GROUP_SCHED 1140 int depth; 1141 struct sched_entity *parent; 1142 /* rq on which this entity is (to be) queued: */ 1143 struct cfs_rq *cfs_rq; 1144 /* rq "owned" by this entity/group: */ 1145 struct cfs_rq *my_q; 1146#endif 1147 1148#ifdef CONFIG_SMP 1149 /* Per-entity load-tracking */ 1150 struct sched_avg avg; 1151#endif 1152}; 1153 1154struct sched_rt_entity { 1155 struct list_head run_list; 1156 unsigned long timeout; 1157 unsigned long watchdog_stamp; 1158 unsigned int time_slice; 1159 1160 struct sched_rt_entity *back; 1161#ifdef CONFIG_RT_GROUP_SCHED 1162 struct sched_rt_entity *parent; 1163 /* rq on which this entity is (to be) queued: */ 1164 struct rt_rq *rt_rq; 1165 /* rq "owned" by this entity/group: */ 1166 struct rt_rq *my_q; 1167#endif 1168}; 1169 1170struct sched_dl_entity { 1171 struct rb_node rb_node; 1172 1173 /* 1174 * Original scheduling parameters. Copied here from sched_attr 1175 * during sched_setattr(), they will remain the same until 1176 * the next sched_setattr(). 1177 */ 1178 u64 dl_runtime; /* maximum runtime for each instance */ 1179 u64 dl_deadline; /* relative deadline of each instance */ 1180 u64 dl_period; /* separation of two instances (period) */ 1181 u64 dl_bw; /* dl_runtime / dl_deadline */ 1182 1183 /* 1184 * Actual scheduling parameters. Initialized with the values above, 1185 * they are continously updated during task execution. Note that 1186 * the remaining runtime could be < 0 in case we are in overrun. 1187 */ 1188 s64 runtime; /* remaining runtime for this instance */ 1189 u64 deadline; /* absolute deadline for this instance */ 1190 unsigned int flags; /* specifying the scheduler behaviour */ 1191 1192 /* 1193 * Some bool flags: 1194 * 1195 * @dl_throttled tells if we exhausted the runtime. If so, the 1196 * task has to wait for a replenishment to be performed at the 1197 * next firing of dl_timer. 1198 * 1199 * @dl_new tells if a new instance arrived. If so we must 1200 * start executing it with full runtime and reset its absolute 1201 * deadline; 1202 * 1203 * @dl_boosted tells if we are boosted due to DI. If so we are 1204 * outside bandwidth enforcement mechanism (but only until we 1205 * exit the critical section); 1206 * 1207 * @dl_yielded tells if task gave up the cpu before consuming 1208 * all its available runtime during the last job. 1209 */ 1210 int dl_throttled, dl_new, dl_boosted, dl_yielded; 1211 1212 /* 1213 * Bandwidth enforcement timer. Each -deadline task has its 1214 * own bandwidth to be enforced, thus we need one timer per task. 1215 */ 1216 struct hrtimer dl_timer; 1217}; 1218 1219union rcu_special { 1220 struct { 1221 bool blocked; 1222 bool need_qs; 1223 } b; 1224 short s; 1225}; 1226struct rcu_node; 1227 1228enum perf_event_task_context { 1229 perf_invalid_context = -1, 1230 perf_hw_context = 0, 1231 perf_sw_context, 1232 perf_nr_task_contexts, 1233}; 1234 1235struct task_struct { 1236 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1237 void *stack; 1238 atomic_t usage; 1239 unsigned int flags; /* per process flags, defined below */ 1240 unsigned int ptrace; 1241 1242#ifdef CONFIG_SMP 1243 struct llist_node wake_entry; 1244 int on_cpu; 1245 struct task_struct *last_wakee; 1246 unsigned long wakee_flips; 1247 unsigned long wakee_flip_decay_ts; 1248 1249 int wake_cpu; 1250#endif 1251 int on_rq; 1252 1253 int prio, static_prio, normal_prio; 1254 unsigned int rt_priority; 1255 const struct sched_class *sched_class; 1256 struct sched_entity se; 1257 struct sched_rt_entity rt; 1258#ifdef CONFIG_CGROUP_SCHED 1259 struct task_group *sched_task_group; 1260#endif 1261 struct sched_dl_entity dl; 1262 1263#ifdef CONFIG_PREEMPT_NOTIFIERS 1264 /* list of struct preempt_notifier: */ 1265 struct hlist_head preempt_notifiers; 1266#endif 1267 1268#ifdef CONFIG_BLK_DEV_IO_TRACE 1269 unsigned int btrace_seq; 1270#endif 1271 1272 unsigned int policy; 1273 int nr_cpus_allowed; 1274 cpumask_t cpus_allowed; 1275 1276#ifdef CONFIG_PREEMPT_RCU 1277 int rcu_read_lock_nesting; 1278 union rcu_special rcu_read_unlock_special; 1279 struct list_head rcu_node_entry; 1280#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1281#ifdef CONFIG_TREE_PREEMPT_RCU 1282 struct rcu_node *rcu_blocked_node; 1283#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1284#ifdef CONFIG_TASKS_RCU 1285 unsigned long rcu_tasks_nvcsw; 1286 bool rcu_tasks_holdout; 1287 struct list_head rcu_tasks_holdout_list; 1288 int rcu_tasks_idle_cpu; 1289#endif /* #ifdef CONFIG_TASKS_RCU */ 1290 1291#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1292 struct sched_info sched_info; 1293#endif 1294 1295 struct list_head tasks; 1296#ifdef CONFIG_SMP 1297 struct plist_node pushable_tasks; 1298 struct rb_node pushable_dl_tasks; 1299#endif 1300 1301 struct mm_struct *mm, *active_mm; 1302#ifdef CONFIG_COMPAT_BRK 1303 unsigned brk_randomized:1; 1304#endif 1305 /* per-thread vma caching */ 1306 u32 vmacache_seqnum; 1307 struct vm_area_struct *vmacache[VMACACHE_SIZE]; 1308#if defined(SPLIT_RSS_COUNTING) 1309 struct task_rss_stat rss_stat; 1310#endif 1311/* task state */ 1312 int exit_state; 1313 int exit_code, exit_signal; 1314 int pdeath_signal; /* The signal sent when the parent dies */ 1315 unsigned int jobctl; /* JOBCTL_*, siglock protected */ 1316 1317 /* Used for emulating ABI behavior of previous Linux versions */ 1318 unsigned int personality; 1319 1320 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1321 * execve */ 1322 unsigned in_iowait:1; 1323 1324 /* Revert to default priority/policy when forking */ 1325 unsigned sched_reset_on_fork:1; 1326 unsigned sched_contributes_to_load:1; 1327 1328 unsigned long atomic_flags; /* Flags needing atomic access. */ 1329 1330 pid_t pid; 1331 pid_t tgid; 1332 1333#ifdef CONFIG_CC_STACKPROTECTOR 1334 /* Canary value for the -fstack-protector gcc feature */ 1335 unsigned long stack_canary; 1336#endif 1337 /* 1338 * pointers to (original) parent process, youngest child, younger sibling, 1339 * older sibling, respectively. (p->father can be replaced with 1340 * p->real_parent->pid) 1341 */ 1342 struct task_struct __rcu *real_parent; /* real parent process */ 1343 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ 1344 /* 1345 * children/sibling forms the list of my natural children 1346 */ 1347 struct list_head children; /* list of my children */ 1348 struct list_head sibling; /* linkage in my parent's children list */ 1349 struct task_struct *group_leader; /* threadgroup leader */ 1350 1351 /* 1352 * ptraced is the list of tasks this task is using ptrace on. 1353 * This includes both natural children and PTRACE_ATTACH targets. 1354 * p->ptrace_entry is p's link on the p->parent->ptraced list. 1355 */ 1356 struct list_head ptraced; 1357 struct list_head ptrace_entry; 1358 1359 /* PID/PID hash table linkage. */ 1360 struct pid_link pids[PIDTYPE_MAX]; 1361 struct list_head thread_group; 1362 struct list_head thread_node; 1363 1364 struct completion *vfork_done; /* for vfork() */ 1365 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1366 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1367 1368 cputime_t utime, stime, utimescaled, stimescaled; 1369 cputime_t gtime; 1370 unsigned long long cpu_power; 1371#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 1372 struct cputime prev_cputime; 1373#endif 1374#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1375 seqlock_t vtime_seqlock; 1376 unsigned long long vtime_snap; 1377 enum { 1378 VTIME_SLEEPING = 0, 1379 VTIME_USER, 1380 VTIME_SYS, 1381 } vtime_snap_whence; 1382#endif 1383 unsigned long nvcsw, nivcsw; /* context switch counts */ 1384 u64 start_time; /* monotonic time in nsec */ 1385 u64 real_start_time; /* boot based time in nsec */ 1386/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1387 unsigned long min_flt, maj_flt; 1388 1389 struct task_cputime cputime_expires; 1390 struct list_head cpu_timers[3]; 1391 1392/* process credentials */ 1393 const struct cred __rcu *real_cred; /* objective and real subjective task 1394 * credentials (COW) */ 1395 const struct cred __rcu *cred; /* effective (overridable) subjective task 1396 * credentials (COW) */ 1397 char comm[TASK_COMM_LEN]; /* executable name excluding path 1398 - access with [gs]et_task_comm (which lock 1399 it with task_lock()) 1400 - initialized normally by setup_new_exec */ 1401/* file system info */ 1402 int link_count, total_link_count; 1403#ifdef CONFIG_SYSVIPC 1404/* ipc stuff */ 1405 struct sysv_sem sysvsem; 1406 struct sysv_shm sysvshm; 1407#endif 1408#ifdef CONFIG_DETECT_HUNG_TASK 1409/* hung task detection */ 1410 unsigned long last_switch_count; 1411#endif 1412/* CPU-specific state of this task */ 1413 struct thread_struct thread; 1414/* filesystem information */ 1415 struct fs_struct *fs; 1416/* open file information */ 1417 struct files_struct *files; 1418/* namespaces */ 1419 struct nsproxy *nsproxy; 1420/* signal handlers */ 1421 struct signal_struct *signal; 1422 struct sighand_struct *sighand; 1423 1424 sigset_t blocked, real_blocked; 1425 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 1426 struct sigpending pending; 1427 1428 unsigned long sas_ss_sp; 1429 size_t sas_ss_size; 1430 int (*notifier)(void *priv); 1431 void *notifier_data; 1432 sigset_t *notifier_mask; 1433 struct callback_head *task_works; 1434 1435 struct audit_context *audit_context; 1436#ifdef CONFIG_AUDITSYSCALL 1437 kuid_t loginuid; 1438 unsigned int sessionid; 1439#endif 1440 struct seccomp seccomp; 1441 1442/* Thread group tracking */ 1443 u32 parent_exec_id; 1444 u32 self_exec_id; 1445/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, 1446 * mempolicy */ 1447 spinlock_t alloc_lock; 1448 1449 /* Protection of the PI data structures: */ 1450 raw_spinlock_t pi_lock; 1451 1452#ifdef CONFIG_RT_MUTEXES 1453 /* PI waiters blocked on a rt_mutex held by this task */ 1454 struct rb_root pi_waiters; 1455 struct rb_node *pi_waiters_leftmost; 1456 /* Deadlock detection and priority inheritance handling */ 1457 struct rt_mutex_waiter *pi_blocked_on; 1458#endif 1459 1460#ifdef CONFIG_DEBUG_MUTEXES 1461 /* mutex deadlock detection */ 1462 struct mutex_waiter *blocked_on; 1463#endif 1464#ifdef CONFIG_TRACE_IRQFLAGS 1465 unsigned int irq_events; 1466 unsigned long hardirq_enable_ip; 1467 unsigned long hardirq_disable_ip; 1468 unsigned int hardirq_enable_event; 1469 unsigned int hardirq_disable_event; 1470 int hardirqs_enabled; 1471 int hardirq_context; 1472 unsigned long softirq_disable_ip; 1473 unsigned long softirq_enable_ip; 1474 unsigned int softirq_disable_event; 1475 unsigned int softirq_enable_event; 1476 int softirqs_enabled; 1477 int softirq_context; 1478#endif 1479#ifdef CONFIG_LOCKDEP 1480# define MAX_LOCK_DEPTH 48UL 1481 u64 curr_chain_key; 1482 int lockdep_depth; 1483 unsigned int lockdep_recursion; 1484 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1485 gfp_t lockdep_reclaim_gfp; 1486#endif 1487 1488/* journalling filesystem info */ 1489 void *journal_info; 1490 1491/* stacked block device info */ 1492 struct bio_list *bio_list; 1493 1494#ifdef CONFIG_BLOCK 1495/* stack plugging */ 1496 struct blk_plug *plug; 1497#endif 1498 1499/* VM state */ 1500 struct reclaim_state *reclaim_state; 1501 1502 struct backing_dev_info *backing_dev_info; 1503 1504 struct io_context *io_context; 1505 1506 unsigned long ptrace_message; 1507 siginfo_t *last_siginfo; /* For ptrace use. */ 1508 struct task_io_accounting ioac; 1509#if defined(CONFIG_TASK_XACCT) 1510 u64 acct_rss_mem1; /* accumulated rss usage */ 1511 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1512 cputime_t acct_timexpd; /* stime + utime since last update */ 1513#endif 1514#ifdef CONFIG_CPUSETS 1515 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1516 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ 1517 int cpuset_mem_spread_rotor; 1518 int cpuset_slab_spread_rotor; 1519#endif 1520#ifdef CONFIG_CGROUPS 1521 /* Control Group info protected by css_set_lock */ 1522 struct css_set __rcu *cgroups; 1523 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1524 struct list_head cg_list; 1525#endif 1526#ifdef CONFIG_FUTEX 1527 struct robust_list_head __user *robust_list; 1528#ifdef CONFIG_COMPAT 1529 struct compat_robust_list_head __user *compat_robust_list; 1530#endif 1531 struct list_head pi_state_list; 1532 struct futex_pi_state *pi_state_cache; 1533#endif 1534#ifdef CONFIG_PERF_EVENTS 1535 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 1536 struct mutex perf_event_mutex; 1537 struct list_head perf_event_list; 1538#endif 1539#ifdef CONFIG_DEBUG_PREEMPT 1540 unsigned long preempt_disable_ip; 1541#endif 1542#ifdef CONFIG_NUMA 1543 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1544 short il_next; 1545 short pref_node_fork; 1546#endif 1547#ifdef CONFIG_NUMA_BALANCING 1548 int numa_scan_seq; 1549 unsigned int numa_scan_period; 1550 unsigned int numa_scan_period_max; 1551 int numa_preferred_nid; 1552 unsigned long numa_migrate_retry; 1553 u64 node_stamp; /* migration stamp */ 1554 u64 last_task_numa_placement; 1555 u64 last_sum_exec_runtime; 1556 struct callback_head numa_work; 1557 1558 struct list_head numa_entry; 1559 struct numa_group *numa_group; 1560 1561 /* 1562 * Exponential decaying average of faults on a per-node basis. 1563 * Scheduling placement decisions are made based on the these counts. 1564 * The values remain static for the duration of a PTE scan 1565 */ 1566 unsigned long *numa_faults_memory; 1567 unsigned long total_numa_faults; 1568 1569 /* 1570 * numa_faults_buffer records faults per node during the current 1571 * scan window. When the scan completes, the counts in 1572 * numa_faults_memory decay and these values are copied. 1573 */ 1574 unsigned long *numa_faults_buffer_memory; 1575 1576 /* 1577 * Track the nodes the process was running on when a NUMA hinting 1578 * fault was incurred. 1579 */ 1580 unsigned long *numa_faults_cpu; 1581 unsigned long *numa_faults_buffer_cpu; 1582 1583 /* 1584 * numa_faults_locality tracks if faults recorded during the last 1585 * scan window were remote/local. The task scan period is adapted 1586 * based on the locality of the faults with different weights 1587 * depending on whether they were shared or private faults 1588 */ 1589 unsigned long numa_faults_locality[2]; 1590 1591 unsigned long numa_pages_migrated; 1592#endif /* CONFIG_NUMA_BALANCING */ 1593 1594 struct rcu_head rcu; 1595 1596 /* 1597 * cache last used pipe for splice 1598 */ 1599 struct pipe_inode_info *splice_pipe; 1600 1601 struct page_frag task_frag; 1602 1603#ifdef CONFIG_TASK_DELAY_ACCT 1604 struct task_delay_info *delays; 1605#endif 1606#ifdef CONFIG_FAULT_INJECTION 1607 int make_it_fail; 1608#endif 1609 /* 1610 * when (nr_dirtied >= nr_dirtied_pause), it's time to call 1611 * balance_dirty_pages() for some dirty throttling pause 1612 */ 1613 int nr_dirtied; 1614 int nr_dirtied_pause; 1615 unsigned long dirty_paused_when; /* start of a write-and-pause period */ 1616 1617#ifdef CONFIG_LATENCYTOP 1618 int latency_record_count; 1619 struct latency_record latency_record[LT_SAVECOUNT]; 1620#endif 1621 /* 1622 * time slack values; these are used to round up poll() and 1623 * select() etc timeout values. These are in nanoseconds. 1624 */ 1625 unsigned long timer_slack_ns; 1626 unsigned long default_timer_slack_ns; 1627 1628#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1629 /* Index of current stored address in ret_stack */ 1630 int curr_ret_stack; 1631 /* Stack of return addresses for return function tracing */ 1632 struct ftrace_ret_stack *ret_stack; 1633 /* time stamp for last schedule */ 1634 unsigned long long ftrace_timestamp; 1635 /* 1636 * Number of functions that haven't been traced 1637 * because of depth overrun. 1638 */ 1639 atomic_t trace_overrun; 1640 /* Pause for the tracing */ 1641 atomic_t tracing_graph_pause; 1642#endif 1643#ifdef CONFIG_TRACING 1644 /* state flags for use by tracers */ 1645 unsigned long trace; 1646 /* bitmask and counter of trace recursion */ 1647 unsigned long trace_recursion; 1648#endif /* CONFIG_TRACING */ 1649#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ 1650 unsigned int memcg_kmem_skip_account; 1651 struct memcg_oom_info { 1652 struct mem_cgroup *memcg; 1653 gfp_t gfp_mask; 1654 int order; 1655 unsigned int may_oom:1; 1656 } memcg_oom; 1657#endif 1658#ifdef CONFIG_UPROBES 1659 struct uprobe_task *utask; 1660#endif 1661#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1662 unsigned int sequential_io; 1663 unsigned int sequential_io_avg; 1664#endif 1665}; 1666 1667/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1668#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1669 1670#define TNF_MIGRATED 0x01 1671#define TNF_NO_GROUP 0x02 1672#define TNF_SHARED 0x04 1673#define TNF_FAULT_LOCAL 0x08 1674 1675#ifdef CONFIG_NUMA_BALANCING 1676extern void task_numa_fault(int last_node, int node, int pages, int flags); 1677extern pid_t task_numa_group_id(struct task_struct *p); 1678extern void set_numabalancing_state(bool enabled); 1679extern void task_numa_free(struct task_struct *p); 1680extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, 1681 int src_nid, int dst_cpu); 1682#else 1683static inline void task_numa_fault(int last_node, int node, int pages, 1684 int flags) 1685{ 1686} 1687static inline pid_t task_numa_group_id(struct task_struct *p) 1688{ 1689 return 0; 1690} 1691static inline void set_numabalancing_state(bool enabled) 1692{ 1693} 1694static inline void task_numa_free(struct task_struct *p) 1695{ 1696} 1697static inline bool should_numa_migrate_memory(struct task_struct *p, 1698 struct page *page, int src_nid, int dst_cpu) 1699{ 1700 return true; 1701} 1702#endif 1703 1704static inline struct pid *task_pid(struct task_struct *task) 1705{ 1706 return task->pids[PIDTYPE_PID].pid; 1707} 1708 1709static inline struct pid *task_tgid(struct task_struct *task) 1710{ 1711 return task->group_leader->pids[PIDTYPE_PID].pid; 1712} 1713 1714/* 1715 * Without tasklist or rcu lock it is not safe to dereference 1716 * the result of task_pgrp/task_session even if task == current, 1717 * we can race with another thread doing sys_setsid/sys_setpgid. 1718 */ 1719static inline struct pid *task_pgrp(struct task_struct *task) 1720{ 1721 return task->group_leader->pids[PIDTYPE_PGID].pid; 1722} 1723 1724static inline struct pid *task_session(struct task_struct *task) 1725{ 1726 return task->group_leader->pids[PIDTYPE_SID].pid; 1727} 1728 1729struct pid_namespace; 1730 1731/* 1732 * the helpers to get the task's different pids as they are seen 1733 * from various namespaces 1734 * 1735 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1736 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1737 * current. 1738 * task_xid_nr_ns() : id seen from the ns specified; 1739 * 1740 * set_task_vxid() : assigns a virtual id to a task; 1741 * 1742 * see also pid_nr() etc in include/linux/pid.h 1743 */ 1744pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 1745 struct pid_namespace *ns); 1746 1747static inline pid_t task_pid_nr(struct task_struct *tsk) 1748{ 1749 return tsk->pid; 1750} 1751 1752static inline pid_t task_pid_nr_ns(struct task_struct *tsk, 1753 struct pid_namespace *ns) 1754{ 1755 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1756} 1757 1758static inline pid_t task_pid_vnr(struct task_struct *tsk) 1759{ 1760 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1761} 1762 1763 1764static inline pid_t task_tgid_nr(struct task_struct *tsk) 1765{ 1766 return tsk->tgid; 1767} 1768 1769pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1770 1771static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1772{ 1773 return pid_vnr(task_tgid(tsk)); 1774} 1775 1776 1777static inline int pid_alive(const struct task_struct *p); 1778static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1779{ 1780 pid_t pid = 0; 1781 1782 rcu_read_lock(); 1783 if (pid_alive(tsk)) 1784 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1785 rcu_read_unlock(); 1786 1787 return pid; 1788} 1789 1790static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1791{ 1792 return task_ppid_nr_ns(tsk, &init_pid_ns); 1793} 1794 1795static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, 1796 struct pid_namespace *ns) 1797{ 1798 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1799} 1800 1801static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1802{ 1803 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1804} 1805 1806 1807static inline pid_t task_session_nr_ns(struct task_struct *tsk, 1808 struct pid_namespace *ns) 1809{ 1810 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1811} 1812 1813static inline pid_t task_session_vnr(struct task_struct *tsk) 1814{ 1815 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1816} 1817 1818/* obsolete, do not use */ 1819static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1820{ 1821 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1822} 1823 1824/** 1825 * pid_alive - check that a task structure is not stale 1826 * @p: Task structure to be checked. 1827 * 1828 * Test if a process is not yet dead (at most zombie state) 1829 * If pid_alive fails, then pointers within the task structure 1830 * can be stale and must not be dereferenced. 1831 * 1832 * Return: 1 if the process is alive. 0 otherwise. 1833 */ 1834static inline int pid_alive(const struct task_struct *p) 1835{ 1836 return p->pids[PIDTYPE_PID].pid != NULL; 1837} 1838 1839/** 1840 * is_global_init - check if a task structure is init 1841 * @tsk: Task structure to be checked. 1842 * 1843 * Check if a task structure is the first user space task the kernel created. 1844 * 1845 * Return: 1 if the task structure is init. 0 otherwise. 1846 */ 1847static inline int is_global_init(struct task_struct *tsk) 1848{ 1849 return tsk->pid == 1; 1850} 1851 1852extern struct pid *cad_pid; 1853 1854extern void free_task(struct task_struct *tsk); 1855#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 1856 1857extern void __put_task_struct(struct task_struct *t); 1858 1859static inline void put_task_struct(struct task_struct *t) 1860{ 1861 if (atomic_dec_and_test(&t->usage)) 1862 __put_task_struct(t); 1863} 1864 1865#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1866extern void task_cputime(struct task_struct *t, 1867 cputime_t *utime, cputime_t *stime); 1868extern void task_cputime_scaled(struct task_struct *t, 1869 cputime_t *utimescaled, cputime_t *stimescaled); 1870extern cputime_t task_gtime(struct task_struct *t); 1871#else 1872static inline void task_cputime(struct task_struct *t, 1873 cputime_t *utime, cputime_t *stime) 1874{ 1875 if (utime) 1876 *utime = t->utime; 1877 if (stime) 1878 *stime = t->stime; 1879} 1880 1881static inline void task_cputime_scaled(struct task_struct *t, 1882 cputime_t *utimescaled, 1883 cputime_t *stimescaled) 1884{ 1885 if (utimescaled) 1886 *utimescaled = t->utimescaled; 1887 if (stimescaled) 1888 *stimescaled = t->stimescaled; 1889} 1890 1891static inline cputime_t task_gtime(struct task_struct *t) 1892{ 1893 return t->gtime; 1894} 1895#endif 1896extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1897extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1898 1899extern int task_free_register(struct notifier_block *n); 1900extern int task_free_unregister(struct notifier_block *n); 1901 1902/* 1903 * Per process flags 1904 */ 1905#define PF_EXITING 0x00000004 /* getting shut down */ 1906#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1907#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1908#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1909#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1910#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 1911#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1912#define PF_DUMPCORE 0x00000200 /* dumped core */ 1913#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1914#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1915#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 1916#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1917#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ 1918#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1919#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1920#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1921#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1922#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ 1923#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1924#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1925#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1926#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1927#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1928#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1929#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1930#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1931#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ 1932 1933/* 1934 * Only the _current_ task can read/write to tsk->flags, but other 1935 * tasks can access tsk->flags in readonly mode for example 1936 * with tsk_used_math (like during threaded core dumping). 1937 * There is however an exception to this rule during ptrace 1938 * or during fork: the ptracer task is allowed to write to the 1939 * child->flags of its traced child (same goes for fork, the parent 1940 * can write to the child->flags), because we're guaranteed the 1941 * child is not running and in turn not changing child->flags 1942 * at the same time the parent does it. 1943 */ 1944#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1945#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1946#define clear_used_math() clear_stopped_child_used_math(current) 1947#define set_used_math() set_stopped_child_used_math(current) 1948#define conditional_stopped_child_used_math(condition, child) \ 1949 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1950#define conditional_used_math(condition) \ 1951 conditional_stopped_child_used_math(condition, current) 1952#define copy_to_stopped_child_used_math(child) \ 1953 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1954/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1955#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1956#define used_math() tsk_used_math(current) 1957 1958/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags 1959 * __GFP_FS is also cleared as it implies __GFP_IO. 1960 */ 1961static inline gfp_t memalloc_noio_flags(gfp_t flags) 1962{ 1963 if (unlikely(current->flags & PF_MEMALLOC_NOIO)) 1964 flags &= ~(__GFP_IO | __GFP_FS); 1965 return flags; 1966} 1967 1968static inline unsigned int memalloc_noio_save(void) 1969{ 1970 unsigned int flags = current->flags & PF_MEMALLOC_NOIO; 1971 current->flags |= PF_MEMALLOC_NOIO; 1972 return flags; 1973} 1974 1975static inline void memalloc_noio_restore(unsigned int flags) 1976{ 1977 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 1978} 1979 1980/* Per-process atomic flags. */ 1981#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1982#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1983#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1984 1985 1986#define TASK_PFA_TEST(name, func) \ 1987 static inline bool task_##func(struct task_struct *p) \ 1988 { return test_bit(PFA_##name, &p->atomic_flags); } 1989#define TASK_PFA_SET(name, func) \ 1990 static inline void task_set_##func(struct task_struct *p) \ 1991 { set_bit(PFA_##name, &p->atomic_flags); } 1992#define TASK_PFA_CLEAR(name, func) \ 1993 static inline void task_clear_##func(struct task_struct *p) \ 1994 { clear_bit(PFA_##name, &p->atomic_flags); } 1995 1996TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1997TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 1998 1999TASK_PFA_TEST(SPREAD_PAGE, spread_page) 2000TASK_PFA_SET(SPREAD_PAGE, spread_page) 2001TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 2002 2003TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 2004TASK_PFA_SET(SPREAD_SLAB, spread_slab) 2005TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 2006 2007/* 2008 * task->jobctl flags 2009 */ 2010#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ 2011 2012#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ 2013#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ 2014#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ 2015#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ 2016#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ 2017#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ 2018#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ 2019 2020#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) 2021#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) 2022#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) 2023#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) 2024#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) 2025#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) 2026#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) 2027 2028#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) 2029#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) 2030 2031extern bool task_set_jobctl_pending(struct task_struct *task, 2032 unsigned int mask); 2033extern void task_clear_jobctl_trapping(struct task_struct *task); 2034extern void task_clear_jobctl_pending(struct task_struct *task, 2035 unsigned int mask); 2036 2037static inline void rcu_copy_process(struct task_struct *p) 2038{ 2039#ifdef CONFIG_PREEMPT_RCU 2040 p->rcu_read_lock_nesting = 0; 2041 p->rcu_read_unlock_special.s = 0; 2042 p->rcu_blocked_node = NULL; 2043 INIT_LIST_HEAD(&p->rcu_node_entry); 2044#endif /* #ifdef CONFIG_PREEMPT_RCU */ 2045#ifdef CONFIG_TASKS_RCU 2046 p->rcu_tasks_holdout = false; 2047 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 2048 p->rcu_tasks_idle_cpu = -1; 2049#endif /* #ifdef CONFIG_TASKS_RCU */ 2050} 2051 2052static inline void tsk_restore_flags(struct task_struct *task, 2053 unsigned long orig_flags, unsigned long flags) 2054{ 2055 task->flags &= ~flags; 2056 task->flags |= orig_flags & flags; 2057} 2058 2059#ifdef CONFIG_SMP 2060extern void do_set_cpus_allowed(struct task_struct *p, 2061 const struct cpumask *new_mask); 2062 2063extern int set_cpus_allowed_ptr(struct task_struct *p, 2064 const struct cpumask *new_mask); 2065#else 2066static inline void do_set_cpus_allowed(struct task_struct *p, 2067 const struct cpumask *new_mask) 2068{ 2069} 2070static inline int set_cpus_allowed_ptr(struct task_struct *p, 2071 const struct cpumask *new_mask) 2072{ 2073 if (!cpumask_test_cpu(0, new_mask)) 2074 return -EINVAL; 2075 return 0; 2076} 2077#endif 2078 2079#ifdef CONFIG_NO_HZ_COMMON 2080void calc_load_enter_idle(void); 2081void calc_load_exit_idle(void); 2082#else 2083static inline void calc_load_enter_idle(void) { } 2084static inline void calc_load_exit_idle(void) { } 2085#endif /* CONFIG_NO_HZ_COMMON */ 2086 2087#ifndef CONFIG_CPUMASK_OFFSTACK 2088static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 2089{ 2090 return set_cpus_allowed_ptr(p, &new_mask); 2091} 2092#endif 2093 2094/* 2095 * Do not use outside of architecture code which knows its limitations. 2096 * 2097 * sched_clock() has no promise of monotonicity or bounded drift between 2098 * CPUs, use (which you should not) requires disabling IRQs. 2099 * 2100 * Please use one of the three interfaces below. 2101 */ 2102extern unsigned long long notrace sched_clock(void); 2103/* 2104 * See the comment in kernel/sched/clock.c 2105 */ 2106extern u64 cpu_clock(int cpu); 2107extern u64 local_clock(void); 2108extern u64 sched_clock_cpu(int cpu); 2109 2110 2111extern void sched_clock_init(void); 2112 2113#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2114static inline void sched_clock_tick(void) 2115{ 2116} 2117 2118static inline void sched_clock_idle_sleep_event(void) 2119{ 2120} 2121 2122static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 2123{ 2124} 2125#else 2126/* 2127 * Architectures can set this to 1 if they have specified 2128 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 2129 * but then during bootup it turns out that sched_clock() 2130 * is reliable after all: 2131 */ 2132extern int sched_clock_stable(void); 2133extern void set_sched_clock_stable(void); 2134extern void clear_sched_clock_stable(void); 2135 2136extern void sched_clock_tick(void); 2137extern void sched_clock_idle_sleep_event(void); 2138extern void sched_clock_idle_wakeup_event(u64 delta_ns); 2139#endif 2140 2141#ifdef CONFIG_IRQ_TIME_ACCOUNTING 2142/* 2143 * An i/f to runtime opt-in for irq time accounting based off of sched_clock. 2144 * The reason for this explicit opt-in is not to have perf penalty with 2145 * slow sched_clocks. 2146 */ 2147extern void enable_sched_clock_irqtime(void); 2148extern void disable_sched_clock_irqtime(void); 2149#else 2150static inline void enable_sched_clock_irqtime(void) {} 2151static inline void disable_sched_clock_irqtime(void) {} 2152#endif 2153 2154extern unsigned long long 2155task_sched_runtime(struct task_struct *task); 2156 2157/* sched_exec is called by processes performing an exec */ 2158#ifdef CONFIG_SMP 2159extern void sched_exec(void); 2160#else 2161#define sched_exec() {} 2162#endif 2163 2164extern void sched_clock_idle_sleep_event(void); 2165extern void sched_clock_idle_wakeup_event(u64 delta_ns); 2166 2167#ifdef CONFIG_HOTPLUG_CPU 2168extern void idle_task_exit(void); 2169#else 2170static inline void idle_task_exit(void) {} 2171#endif 2172 2173#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) 2174extern void wake_up_nohz_cpu(int cpu); 2175#else 2176static inline void wake_up_nohz_cpu(int cpu) { } 2177#endif 2178 2179#ifdef CONFIG_NO_HZ_FULL 2180extern bool sched_can_stop_tick(void); 2181extern u64 scheduler_tick_max_deferment(void); 2182#else 2183static inline bool sched_can_stop_tick(void) { return false; } 2184#endif 2185 2186#ifdef CONFIG_SCHED_AUTOGROUP 2187extern void sched_autogroup_create_attach(struct task_struct *p); 2188extern void sched_autogroup_detach(struct task_struct *p); 2189extern void sched_autogroup_fork(struct signal_struct *sig); 2190extern void sched_autogroup_exit(struct signal_struct *sig); 2191#ifdef CONFIG_PROC_FS 2192extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2193extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); 2194#endif 2195#else 2196static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2197static inline void sched_autogroup_detach(struct task_struct *p) { } 2198static inline void sched_autogroup_fork(struct signal_struct *sig) { } 2199static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2200#endif 2201 2202extern int yield_to(struct task_struct *p, bool preempt); 2203extern void set_user_nice(struct task_struct *p, long nice); 2204extern int task_prio(const struct task_struct *p); 2205/** 2206 * task_nice - return the nice value of a given task. 2207 * @p: the task in question. 2208 * 2209 * Return: The nice value [ -20 ... 0 ... 19 ]. 2210 */ 2211static inline int task_nice(const struct task_struct *p) 2212{ 2213 return PRIO_TO_NICE((p)->static_prio); 2214} 2215extern int can_nice(const struct task_struct *p, const int nice); 2216extern int task_curr(const struct task_struct *p); 2217extern int idle_cpu(int cpu); 2218extern int sched_setscheduler(struct task_struct *, int, 2219 const struct sched_param *); 2220extern int sched_setscheduler_nocheck(struct task_struct *, int, 2221 const struct sched_param *); 2222extern int sched_setattr(struct task_struct *, 2223 const struct sched_attr *); 2224extern struct task_struct *idle_task(int cpu); 2225/** 2226 * is_idle_task - is the specified task an idle task? 2227 * @p: the task in question. 2228 * 2229 * Return: 1 if @p is an idle task. 0 otherwise. 2230 */ 2231static inline bool is_idle_task(const struct task_struct *p) 2232{ 2233 return p->pid == 0; 2234} 2235extern struct task_struct *curr_task(int cpu); 2236extern void set_curr_task(int cpu, struct task_struct *p); 2237 2238void yield(void); 2239 2240/* 2241 * The default (Linux) execution domain. 2242 */ 2243extern struct exec_domain default_exec_domain; 2244 2245union thread_union { 2246 struct thread_info thread_info; 2247 unsigned long stack[THREAD_SIZE/sizeof(long)]; 2248}; 2249 2250#ifndef __HAVE_ARCH_KSTACK_END 2251static inline int kstack_end(void *addr) 2252{ 2253 /* Reliable end of stack detection: 2254 * Some APM bios versions misalign the stack 2255 */ 2256 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 2257} 2258#endif 2259 2260extern union thread_union init_thread_union; 2261extern struct task_struct init_task; 2262 2263extern struct mm_struct init_mm; 2264 2265extern struct pid_namespace init_pid_ns; 2266 2267/* 2268 * find a task by one of its numerical ids 2269 * 2270 * find_task_by_pid_ns(): 2271 * finds a task by its pid in the specified namespace 2272 * find_task_by_vpid(): 2273 * finds a task by its virtual pid 2274 * 2275 * see also find_vpid() etc in include/linux/pid.h 2276 */ 2277 2278extern struct task_struct *find_task_by_vpid(pid_t nr); 2279extern struct task_struct *find_task_by_pid_ns(pid_t nr, 2280 struct pid_namespace *ns); 2281 2282/* per-UID process charging. */ 2283extern struct user_struct * alloc_uid(kuid_t); 2284static inline struct user_struct *get_uid(struct user_struct *u) 2285{ 2286 atomic_inc(&u->__count); 2287 return u; 2288} 2289extern void free_uid(struct user_struct *); 2290 2291#include <asm/current.h> 2292 2293extern void xtime_update(unsigned long ticks); 2294 2295extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2296extern int wake_up_process(struct task_struct *tsk); 2297extern void wake_up_new_task(struct task_struct *tsk); 2298#ifdef CONFIG_SMP 2299 extern void kick_process(struct task_struct *tsk); 2300#else 2301 static inline void kick_process(struct task_struct *tsk) { } 2302#endif 2303extern int sched_fork(unsigned long clone_flags, struct task_struct *p); 2304extern void sched_dead(struct task_struct *p); 2305 2306extern void proc_caches_init(void); 2307extern void flush_signals(struct task_struct *); 2308extern void __flush_signals(struct task_struct *); 2309extern void ignore_signals(struct task_struct *); 2310extern void flush_signal_handlers(struct task_struct *, int force_default); 2311extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 2312 2313static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 2314{ 2315 unsigned long flags; 2316 int ret; 2317 2318 spin_lock_irqsave(&tsk->sighand->siglock, flags); 2319 ret = dequeue_signal(tsk, mask, info); 2320 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2321 2322 return ret; 2323} 2324 2325extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2326 sigset_t *mask); 2327extern void unblock_all_signals(void); 2328extern void release_task(struct task_struct * p); 2329extern int send_sig_info(int, struct siginfo *, struct task_struct *); 2330extern int force_sigsegv(int, struct task_struct *); 2331extern int force_sig_info(int, struct siginfo *, struct task_struct *); 2332extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 2333extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 2334extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, 2335 const struct cred *, u32); 2336extern int kill_pgrp(struct pid *pid, int sig, int priv); 2337extern int kill_pid(struct pid *pid, int sig, int priv); 2338extern int kill_proc_info(int, struct siginfo *, pid_t); 2339extern __must_check bool do_notify_parent(struct task_struct *, int); 2340extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2341extern void force_sig(int, struct task_struct *); 2342extern int send_sig(int, struct task_struct *, int); 2343extern int zap_other_threads(struct task_struct *p); 2344extern struct sigqueue *sigqueue_alloc(void); 2345extern void sigqueue_free(struct sigqueue *); 2346extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2347extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 2348 2349static inline void restore_saved_sigmask(void) 2350{ 2351 if (test_and_clear_restore_sigmask()) 2352 __set_current_blocked(¤t->saved_sigmask); 2353} 2354 2355static inline sigset_t *sigmask_to_save(void) 2356{ 2357 sigset_t *res = ¤t->blocked; 2358 if (unlikely(test_restore_sigmask())) 2359 res = ¤t->saved_sigmask; 2360 return res; 2361} 2362 2363static inline int kill_cad_pid(int sig, int priv) 2364{ 2365 return kill_pid(cad_pid, sig, priv); 2366} 2367 2368/* These can be the second arg to send_sig_info/send_group_sig_info. */ 2369#define SEND_SIG_NOINFO ((struct siginfo *) 0) 2370#define SEND_SIG_PRIV ((struct siginfo *) 1) 2371#define SEND_SIG_FORCED ((struct siginfo *) 2) 2372 2373/* 2374 * True if we are on the alternate signal stack. 2375 */ 2376static inline int on_sig_stack(unsigned long sp) 2377{ 2378#ifdef CONFIG_STACK_GROWSUP 2379 return sp >= current->sas_ss_sp && 2380 sp - current->sas_ss_sp < current->sas_ss_size; 2381#else 2382 return sp > current->sas_ss_sp && 2383 sp - current->sas_ss_sp <= current->sas_ss_size; 2384#endif 2385} 2386 2387static inline int sas_ss_flags(unsigned long sp) 2388{ 2389 if (!current->sas_ss_size) 2390 return SS_DISABLE; 2391 2392 return on_sig_stack(sp) ? SS_ONSTACK : 0; 2393} 2394 2395static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) 2396{ 2397 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) 2398#ifdef CONFIG_STACK_GROWSUP 2399 return current->sas_ss_sp; 2400#else 2401 return current->sas_ss_sp + current->sas_ss_size; 2402#endif 2403 return sp; 2404} 2405 2406/* 2407 * Routines for handling mm_structs 2408 */ 2409extern struct mm_struct * mm_alloc(void); 2410 2411/* mmdrop drops the mm and the page tables */ 2412extern void __mmdrop(struct mm_struct *); 2413static inline void mmdrop(struct mm_struct * mm) 2414{ 2415 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2416 __mmdrop(mm); 2417} 2418 2419/* mmput gets rid of the mappings and all user-space */ 2420extern void mmput(struct mm_struct *); 2421/* Grab a reference to a task's mm, if it is not already going away */ 2422extern struct mm_struct *get_task_mm(struct task_struct *task); 2423/* 2424 * Grab a reference to a task's mm, if it is not already going away 2425 * and ptrace_may_access with the mode parameter passed to it 2426 * succeeds. 2427 */ 2428extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 2429/* Remove the current tasks stale references to the old mm_struct */ 2430extern void mm_release(struct task_struct *, struct mm_struct *); 2431 2432extern int copy_thread(unsigned long, unsigned long, unsigned long, 2433 struct task_struct *); 2434extern void flush_thread(void); 2435extern void exit_thread(void); 2436 2437extern void exit_files(struct task_struct *); 2438extern void __cleanup_sighand(struct sighand_struct *); 2439 2440extern void exit_itimers(struct signal_struct *); 2441extern void flush_itimer_signals(void); 2442 2443extern void do_group_exit(int); 2444 2445extern int do_execve(struct filename *, 2446 const char __user * const __user *, 2447 const char __user * const __user *); 2448extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 2449struct task_struct *fork_idle(int); 2450extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 2451 2452extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 2453static inline void set_task_comm(struct task_struct *tsk, const char *from) 2454{ 2455 __set_task_comm(tsk, from, false); 2456} 2457extern char *get_task_comm(char *to, struct task_struct *tsk); 2458 2459#ifdef CONFIG_SMP 2460void scheduler_ipi(void); 2461extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2462#else 2463static inline void scheduler_ipi(void) { } 2464static inline unsigned long wait_task_inactive(struct task_struct *p, 2465 long match_state) 2466{ 2467 return 1; 2468} 2469#endif 2470 2471#define next_task(p) \ 2472 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 2473 2474#define for_each_process(p) \ 2475 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2476 2477extern bool current_is_single_threaded(void); 2478 2479/* 2480 * Careful: do_each_thread/while_each_thread is a double loop so 2481 * 'break' will not work as expected - use goto instead. 2482 */ 2483#define do_each_thread(g, t) \ 2484 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 2485 2486#define while_each_thread(g, t) \ 2487 while ((t = next_thread(t)) != g) 2488 2489#define __for_each_thread(signal, t) \ 2490 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) 2491 2492#define for_each_thread(p, t) \ 2493 __for_each_thread((p)->signal, t) 2494 2495/* Careful: this is a double loop, 'break' won't work as expected. */ 2496#define for_each_process_thread(p, t) \ 2497 for_each_process(p) for_each_thread(p, t) 2498 2499static inline int get_nr_threads(struct task_struct *tsk) 2500{ 2501 return tsk->signal->nr_threads; 2502} 2503 2504static inline bool thread_group_leader(struct task_struct *p) 2505{ 2506 return p->exit_signal >= 0; 2507} 2508 2509/* Do to the insanities of de_thread it is possible for a process 2510 * to have the pid of the thread group leader without actually being 2511 * the thread group leader. For iteration through the pids in proc 2512 * all we care about is that we have a task with the appropriate 2513 * pid, we don't actually care if we have the right task. 2514 */ 2515static inline bool has_group_leader_pid(struct task_struct *p) 2516{ 2517 return task_pid(p) == p->signal->leader_pid; 2518} 2519 2520static inline 2521bool same_thread_group(struct task_struct *p1, struct task_struct *p2) 2522{ 2523 return p1->signal == p2->signal; 2524} 2525 2526static inline struct task_struct *next_thread(const struct task_struct *p) 2527{ 2528 return list_entry_rcu(p->thread_group.next, 2529 struct task_struct, thread_group); 2530} 2531 2532static inline int thread_group_empty(struct task_struct *p) 2533{ 2534 return list_empty(&p->thread_group); 2535} 2536 2537#define delay_group_leader(p) \ 2538 (thread_group_leader(p) && !thread_group_empty(p)) 2539 2540/* 2541 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2542 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2543 * pins the final release of task.io_context. Also protects ->cpuset and 2544 * ->cgroup.subsys[]. And ->vfork_done. 2545 * 2546 * Nests both inside and outside of read_lock(&tasklist_lock). 2547 * It must not be nested with write_lock_irq(&tasklist_lock), 2548 * neither inside nor outside. 2549 */ 2550static inline void task_lock(struct task_struct *p) 2551{ 2552 spin_lock(&p->alloc_lock); 2553} 2554 2555static inline void task_unlock(struct task_struct *p) 2556{ 2557 spin_unlock(&p->alloc_lock); 2558} 2559 2560extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2561 unsigned long *flags); 2562 2563static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, 2564 unsigned long *flags) 2565{ 2566 struct sighand_struct *ret; 2567 2568 ret = __lock_task_sighand(tsk, flags); 2569 (void)__cond_lock(&tsk->sighand->siglock, ret); 2570 return ret; 2571} 2572 2573static inline void unlock_task_sighand(struct task_struct *tsk, 2574 unsigned long *flags) 2575{ 2576 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2577} 2578 2579#ifdef CONFIG_CGROUPS 2580static inline void threadgroup_change_begin(struct task_struct *tsk) 2581{ 2582 down_read(&tsk->signal->group_rwsem); 2583} 2584static inline void threadgroup_change_end(struct task_struct *tsk) 2585{ 2586 up_read(&tsk->signal->group_rwsem); 2587} 2588 2589/** 2590 * threadgroup_lock - lock threadgroup 2591 * @tsk: member task of the threadgroup to lock 2592 * 2593 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter 2594 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or 2595 * change ->group_leader/pid. This is useful for cases where the threadgroup 2596 * needs to stay stable across blockable operations. 2597 * 2598 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for 2599 * synchronization. While held, no new task will be added to threadgroup 2600 * and no existing live task will have its PF_EXITING set. 2601 * 2602 * de_thread() does threadgroup_change_{begin|end}() when a non-leader 2603 * sub-thread becomes a new leader. 2604 */ 2605static inline void threadgroup_lock(struct task_struct *tsk) 2606{ 2607 down_write(&tsk->signal->group_rwsem); 2608} 2609 2610/** 2611 * threadgroup_unlock - unlock threadgroup 2612 * @tsk: member task of the threadgroup to unlock 2613 * 2614 * Reverse threadgroup_lock(). 2615 */ 2616static inline void threadgroup_unlock(struct task_struct *tsk) 2617{ 2618 up_write(&tsk->signal->group_rwsem); 2619} 2620#else 2621static inline void threadgroup_change_begin(struct task_struct *tsk) {} 2622static inline void threadgroup_change_end(struct task_struct *tsk) {} 2623static inline void threadgroup_lock(struct task_struct *tsk) {} 2624static inline void threadgroup_unlock(struct task_struct *tsk) {} 2625#endif 2626 2627#ifndef __HAVE_THREAD_FUNCTIONS 2628 2629#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2630#define task_stack_page(task) ((task)->stack) 2631 2632static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 2633{ 2634 *task_thread_info(p) = *task_thread_info(org); 2635 task_thread_info(p)->task = p; 2636} 2637 2638/* 2639 * Return the address of the last usable long on the stack. 2640 * 2641 * When the stack grows down, this is just above the thread 2642 * info struct. Going any lower will corrupt the threadinfo. 2643 * 2644 * When the stack grows up, this is the highest address. 2645 * Beyond that position, we corrupt data on the next page. 2646 */ 2647static inline unsigned long *end_of_stack(struct task_struct *p) 2648{ 2649#ifdef CONFIG_STACK_GROWSUP 2650 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; 2651#else 2652 return (unsigned long *)(task_thread_info(p) + 1); 2653#endif 2654} 2655 2656#endif 2657#define task_stack_end_corrupted(task) \ 2658 (*(end_of_stack(task)) != STACK_END_MAGIC) 2659 2660static inline int object_is_on_stack(void *obj) 2661{ 2662 void *stack = task_stack_page(current); 2663 2664 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 2665} 2666 2667extern void thread_info_cache_init(void); 2668 2669#ifdef CONFIG_DEBUG_STACK_USAGE 2670static inline unsigned long stack_not_used(struct task_struct *p) 2671{ 2672 unsigned long *n = end_of_stack(p); 2673 2674 do { /* Skip over canary */ 2675 n++; 2676 } while (!*n); 2677 2678 return (unsigned long)n - (unsigned long)end_of_stack(p); 2679} 2680#endif 2681extern void set_task_stack_end_magic(struct task_struct *tsk); 2682 2683/* set thread flags in other task's structures 2684 * - see asm/thread_info.h for TIF_xxxx flags available 2685 */ 2686static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 2687{ 2688 set_ti_thread_flag(task_thread_info(tsk), flag); 2689} 2690 2691static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2692{ 2693 clear_ti_thread_flag(task_thread_info(tsk), flag); 2694} 2695 2696static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2697{ 2698 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2699} 2700 2701static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2702{ 2703 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2704} 2705 2706static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2707{ 2708 return test_ti_thread_flag(task_thread_info(tsk), flag); 2709} 2710 2711static inline void set_tsk_need_resched(struct task_struct *tsk) 2712{ 2713 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2714} 2715 2716static inline void clear_tsk_need_resched(struct task_struct *tsk) 2717{ 2718 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2719} 2720 2721static inline int test_tsk_need_resched(struct task_struct *tsk) 2722{ 2723 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2724} 2725 2726static inline int restart_syscall(void) 2727{ 2728 set_tsk_thread_flag(current, TIF_SIGPENDING); 2729 return -ERESTARTNOINTR; 2730} 2731 2732static inline int signal_pending(struct task_struct *p) 2733{ 2734 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2735} 2736 2737static inline int __fatal_signal_pending(struct task_struct *p) 2738{ 2739 return unlikely(sigismember(&p->pending.signal, SIGKILL)); 2740} 2741 2742static inline int fatal_signal_pending(struct task_struct *p) 2743{ 2744 return signal_pending(p) && __fatal_signal_pending(p); 2745} 2746 2747static inline int signal_pending_state(long state, struct task_struct *p) 2748{ 2749 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 2750 return 0; 2751 if (!signal_pending(p)) 2752 return 0; 2753 2754 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2755} 2756 2757/* 2758 * cond_resched() and cond_resched_lock(): latency reduction via 2759 * explicit rescheduling in places that are safe. The return 2760 * value indicates whether a reschedule was done in fact. 2761 * cond_resched_lock() will drop the spinlock before scheduling, 2762 * cond_resched_softirq() will enable bhs before scheduling. 2763 */ 2764extern int _cond_resched(void); 2765 2766#define cond_resched() ({ \ 2767 __might_sleep(__FILE__, __LINE__, 0); \ 2768 _cond_resched(); \ 2769}) 2770 2771extern int __cond_resched_lock(spinlock_t *lock); 2772 2773#ifdef CONFIG_PREEMPT_COUNT 2774#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET 2775#else 2776#define PREEMPT_LOCK_OFFSET 0 2777#endif 2778 2779#define cond_resched_lock(lock) ({ \ 2780 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ 2781 __cond_resched_lock(lock); \ 2782}) 2783 2784extern int __cond_resched_softirq(void); 2785 2786#define cond_resched_softirq() ({ \ 2787 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 2788 __cond_resched_softirq(); \ 2789}) 2790 2791static inline void cond_resched_rcu(void) 2792{ 2793#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 2794 rcu_read_unlock(); 2795 cond_resched(); 2796 rcu_read_lock(); 2797#endif 2798} 2799 2800/* 2801 * Does a critical section need to be broken due to another 2802 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 2803 * but a general need for low latency) 2804 */ 2805static inline int spin_needbreak(spinlock_t *lock) 2806{ 2807#ifdef CONFIG_PREEMPT 2808 return spin_is_contended(lock); 2809#else 2810 return 0; 2811#endif 2812} 2813 2814/* 2815 * Idle thread specific functions to determine the need_resched 2816 * polling state. 2817 */ 2818#ifdef TIF_POLLING_NRFLAG 2819static inline int tsk_is_polling(struct task_struct *p) 2820{ 2821 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); 2822} 2823 2824static inline void __current_set_polling(void) 2825{ 2826 set_thread_flag(TIF_POLLING_NRFLAG); 2827} 2828 2829static inline bool __must_check current_set_polling_and_test(void) 2830{ 2831 __current_set_polling(); 2832 2833 /* 2834 * Polling state must be visible before we test NEED_RESCHED, 2835 * paired by resched_curr() 2836 */ 2837 smp_mb__after_atomic(); 2838 2839 return unlikely(tif_need_resched()); 2840} 2841 2842static inline void __current_clr_polling(void) 2843{ 2844 clear_thread_flag(TIF_POLLING_NRFLAG); 2845} 2846 2847static inline bool __must_check current_clr_polling_and_test(void) 2848{ 2849 __current_clr_polling(); 2850 2851 /* 2852 * Polling state must be visible before we test NEED_RESCHED, 2853 * paired by resched_curr() 2854 */ 2855 smp_mb__after_atomic(); 2856 2857 return unlikely(tif_need_resched()); 2858} 2859 2860#else 2861static inline int tsk_is_polling(struct task_struct *p) { return 0; } 2862static inline void __current_set_polling(void) { } 2863static inline void __current_clr_polling(void) { } 2864 2865static inline bool __must_check current_set_polling_and_test(void) 2866{ 2867 return unlikely(tif_need_resched()); 2868} 2869static inline bool __must_check current_clr_polling_and_test(void) 2870{ 2871 return unlikely(tif_need_resched()); 2872} 2873#endif 2874 2875static inline void current_clr_polling(void) 2876{ 2877 __current_clr_polling(); 2878 2879 /* 2880 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. 2881 * Once the bit is cleared, we'll get IPIs with every new 2882 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also 2883 * fold. 2884 */ 2885 smp_mb(); /* paired with resched_curr() */ 2886 2887 preempt_fold_need_resched(); 2888} 2889 2890static __always_inline bool need_resched(void) 2891{ 2892 return unlikely(tif_need_resched()); 2893} 2894 2895/* 2896 * Thread group CPU time accounting. 2897 */ 2898void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 2899void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 2900 2901static inline void thread_group_cputime_init(struct signal_struct *sig) 2902{ 2903 raw_spin_lock_init(&sig->cputimer.lock); 2904} 2905 2906/* 2907 * Reevaluate whether the task has signals pending delivery. 2908 * Wake the task if so. 2909 * This is required every time the blocked sigset_t changes. 2910 * callers must hold sighand->siglock. 2911 */ 2912extern void recalc_sigpending_and_wake(struct task_struct *t); 2913extern void recalc_sigpending(void); 2914 2915extern void signal_wake_up_state(struct task_struct *t, unsigned int state); 2916 2917static inline void signal_wake_up(struct task_struct *t, bool resume) 2918{ 2919 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); 2920} 2921static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) 2922{ 2923 signal_wake_up_state(t, resume ? __TASK_TRACED : 0); 2924} 2925 2926/* 2927 * Wrappers for p->thread_info->cpu access. No-op on UP. 2928 */ 2929#ifdef CONFIG_SMP 2930 2931static inline unsigned int task_cpu(const struct task_struct *p) 2932{ 2933 return task_thread_info(p)->cpu; 2934} 2935 2936static inline int task_node(const struct task_struct *p) 2937{ 2938 return cpu_to_node(task_cpu(p)); 2939} 2940 2941extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2942 2943#else 2944 2945static inline unsigned int task_cpu(const struct task_struct *p) 2946{ 2947 return 0; 2948} 2949 2950static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2951{ 2952} 2953 2954#endif /* CONFIG_SMP */ 2955 2956extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2957extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2958 2959#ifdef CONFIG_CGROUP_SCHED 2960extern struct task_group root_task_group; 2961#endif /* CONFIG_CGROUP_SCHED */ 2962 2963extern int task_can_switch_user(struct user_struct *up, 2964 struct task_struct *tsk); 2965 2966#ifdef CONFIG_TASK_XACCT 2967static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2968{ 2969 tsk->ioac.rchar += amt; 2970} 2971 2972static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2973{ 2974 tsk->ioac.wchar += amt; 2975} 2976 2977static inline void inc_syscr(struct task_struct *tsk) 2978{ 2979 tsk->ioac.syscr++; 2980} 2981 2982static inline void inc_syscw(struct task_struct *tsk) 2983{ 2984 tsk->ioac.syscw++; 2985} 2986#else 2987static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2988{ 2989} 2990 2991static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2992{ 2993} 2994 2995static inline void inc_syscr(struct task_struct *tsk) 2996{ 2997} 2998 2999static inline void inc_syscw(struct task_struct *tsk) 3000{ 3001} 3002#endif 3003 3004#ifndef TASK_SIZE_OF 3005#define TASK_SIZE_OF(tsk) TASK_SIZE 3006#endif 3007 3008#ifdef CONFIG_MEMCG 3009extern void mm_update_next_owner(struct mm_struct *mm); 3010#else 3011static inline void mm_update_next_owner(struct mm_struct *mm) 3012{ 3013} 3014#endif /* CONFIG_MEMCG */ 3015 3016static inline unsigned long task_rlimit(const struct task_struct *tsk, 3017 unsigned int limit) 3018{ 3019 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); 3020} 3021 3022static inline unsigned long task_rlimit_max(const struct task_struct *tsk, 3023 unsigned int limit) 3024{ 3025 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); 3026} 3027 3028static inline unsigned long rlimit(unsigned int limit) 3029{ 3030 return task_rlimit(current, limit); 3031} 3032 3033static inline unsigned long rlimit_max(unsigned int limit) 3034{ 3035 return task_rlimit_max(current, limit); 3036} 3037 3038#endif 3039