sched.h revision 83e1d2cd9eabec5164afea295ff06b941ae8e4a9
1#ifndef _LINUX_SCHED_H 2#define _LINUX_SCHED_H 3 4#include <uapi/linux/sched.h> 5 6 7struct sched_param { 8 int sched_priority; 9}; 10 11#include <asm/param.h> /* for HZ */ 12 13#include <linux/capability.h> 14#include <linux/threads.h> 15#include <linux/kernel.h> 16#include <linux/types.h> 17#include <linux/timex.h> 18#include <linux/jiffies.h> 19#include <linux/rbtree.h> 20#include <linux/thread_info.h> 21#include <linux/cpumask.h> 22#include <linux/errno.h> 23#include <linux/nodemask.h> 24#include <linux/mm_types.h> 25#include <linux/preempt.h> 26 27#include <asm/page.h> 28#include <asm/ptrace.h> 29#include <asm/cputime.h> 30 31#include <linux/smp.h> 32#include <linux/sem.h> 33#include <linux/signal.h> 34#include <linux/compiler.h> 35#include <linux/completion.h> 36#include <linux/pid.h> 37#include <linux/percpu.h> 38#include <linux/topology.h> 39#include <linux/proportions.h> 40#include <linux/seccomp.h> 41#include <linux/rcupdate.h> 42#include <linux/rculist.h> 43#include <linux/rtmutex.h> 44 45#include <linux/time.h> 46#include <linux/param.h> 47#include <linux/resource.h> 48#include <linux/timer.h> 49#include <linux/hrtimer.h> 50#include <linux/task_io_accounting.h> 51#include <linux/latencytop.h> 52#include <linux/cred.h> 53#include <linux/llist.h> 54#include <linux/uidgid.h> 55#include <linux/gfp.h> 56 57#include <asm/processor.h> 58 59struct exec_domain; 60struct futex_pi_state; 61struct robust_list_head; 62struct bio_list; 63struct fs_struct; 64struct perf_event_context; 65struct blk_plug; 66 67/* 68 * List of flags we want to share for kernel threads, 69 * if only because they are not used by them anyway. 70 */ 71#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) 72 73/* 74 * These are the constant used to fake the fixed-point load-average 75 * counting. Some notes: 76 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 77 * a load-average precision of 10 bits integer + 11 bits fractional 78 * - if you want to count load-averages more often, you need more 79 * precision, or rounding will get you. With 2-second counting freq, 80 * the EXP_n values would be 1981, 2034 and 2043 if still using only 81 * 11 bit fractions. 82 */ 83extern unsigned long avenrun[]; /* Load averages */ 84extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); 85 86#define FSHIFT 11 /* nr of bits of precision */ 87#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 88#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ 89#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 90#define EXP_5 2014 /* 1/exp(5sec/5min) */ 91#define EXP_15 2037 /* 1/exp(5sec/15min) */ 92 93#define CALC_LOAD(load,exp,n) \ 94 load *= exp; \ 95 load += n*(FIXED_1-exp); \ 96 load >>= FSHIFT; 97 98extern unsigned long total_forks; 99extern int nr_threads; 100DECLARE_PER_CPU(unsigned long, process_counts); 101extern int nr_processes(void); 102extern unsigned long nr_running(void); 103extern unsigned long nr_iowait(void); 104extern unsigned long nr_iowait_cpu(int cpu); 105extern unsigned long this_cpu_load(void); 106 107 108extern void calc_global_load(unsigned long ticks); 109extern void update_cpu_load_nohz(void); 110 111extern unsigned long get_parent_ip(unsigned long addr); 112 113extern void dump_cpu_task(int cpu); 114 115struct seq_file; 116struct cfs_rq; 117struct task_group; 118#ifdef CONFIG_SCHED_DEBUG 119extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 120extern void proc_sched_set_task(struct task_struct *p); 121extern void 122print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 123#endif 124 125/* 126 * Task state bitmask. NOTE! These bits are also 127 * encoded in fs/proc/array.c: get_task_state(). 128 * 129 * We have two separate sets of flags: task->state 130 * is about runnability, while task->exit_state are 131 * about the task exiting. Confusing, but this way 132 * modifying one set can't modify the other one by 133 * mistake. 134 */ 135#define TASK_RUNNING 0 136#define TASK_INTERRUPTIBLE 1 137#define TASK_UNINTERRUPTIBLE 2 138#define __TASK_STOPPED 4 139#define __TASK_TRACED 8 140/* in tsk->exit_state */ 141#define EXIT_ZOMBIE 16 142#define EXIT_DEAD 32 143/* in tsk->state again */ 144#define TASK_DEAD 64 145#define TASK_WAKEKILL 128 146#define TASK_WAKING 256 147#define TASK_PARKED 512 148#define TASK_STATE_MAX 1024 149 150#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" 151 152extern char ___assert_task_state[1 - 2*!!( 153 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 154 155/* Convenience macros for the sake of set_task_state */ 156#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 157#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 158#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 159 160/* Convenience macros for the sake of wake_up */ 161#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 162#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 163 164/* get_task_state() */ 165#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 166 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 167 __TASK_TRACED) 168 169#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 170#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 171#define task_is_dead(task) ((task)->exit_state != 0) 172#define task_is_stopped_or_traced(task) \ 173 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 174#define task_contributes_to_load(task) \ 175 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 176 (task->flags & PF_FROZEN) == 0) 177 178#define __set_task_state(tsk, state_value) \ 179 do { (tsk)->state = (state_value); } while (0) 180#define set_task_state(tsk, state_value) \ 181 set_mb((tsk)->state, (state_value)) 182 183/* 184 * set_current_state() includes a barrier so that the write of current->state 185 * is correctly serialised wrt the caller's subsequent test of whether to 186 * actually sleep: 187 * 188 * set_current_state(TASK_UNINTERRUPTIBLE); 189 * if (do_i_need_to_sleep()) 190 * schedule(); 191 * 192 * If the caller does not need such serialisation then use __set_current_state() 193 */ 194#define __set_current_state(state_value) \ 195 do { current->state = (state_value); } while (0) 196#define set_current_state(state_value) \ 197 set_mb(current->state, (state_value)) 198 199/* Task command name length */ 200#define TASK_COMM_LEN 16 201 202#include <linux/spinlock.h> 203 204/* 205 * This serializes "schedule()" and also protects 206 * the run-queue from deletions/modifications (but 207 * _adding_ to the beginning of the run-queue has 208 * a separate lock). 209 */ 210extern rwlock_t tasklist_lock; 211extern spinlock_t mmlist_lock; 212 213struct task_struct; 214 215#ifdef CONFIG_PROVE_RCU 216extern int lockdep_tasklist_lock_is_held(void); 217#endif /* #ifdef CONFIG_PROVE_RCU */ 218 219extern void sched_init(void); 220extern void sched_init_smp(void); 221extern asmlinkage void schedule_tail(struct task_struct *prev); 222extern void init_idle(struct task_struct *idle, int cpu); 223extern void init_idle_bootup_task(struct task_struct *idle); 224 225extern int runqueue_is_locked(int cpu); 226 227#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 228extern void nohz_balance_enter_idle(int cpu); 229extern void set_cpu_sd_state_idle(void); 230extern int get_nohz_timer_target(void); 231#else 232static inline void nohz_balance_enter_idle(int cpu) { } 233static inline void set_cpu_sd_state_idle(void) { } 234#endif 235 236/* 237 * Only dump TASK_* tasks. (0 for all tasks) 238 */ 239extern void show_state_filter(unsigned long state_filter); 240 241static inline void show_state(void) 242{ 243 show_state_filter(0); 244} 245 246extern void show_regs(struct pt_regs *); 247 248/* 249 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current 250 * task), SP is the stack pointer of the first frame that should be shown in the back 251 * trace (or NULL if the entire call-chain of the task should be shown). 252 */ 253extern void show_stack(struct task_struct *task, unsigned long *sp); 254 255void io_schedule(void); 256long io_schedule_timeout(long timeout); 257 258extern void cpu_init (void); 259extern void trap_init(void); 260extern void update_process_times(int user); 261extern void scheduler_tick(void); 262 263extern void sched_show_task(struct task_struct *p); 264 265#ifdef CONFIG_LOCKUP_DETECTOR 266extern void touch_softlockup_watchdog(void); 267extern void touch_softlockup_watchdog_sync(void); 268extern void touch_all_softlockup_watchdogs(void); 269extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, 270 void __user *buffer, 271 size_t *lenp, loff_t *ppos); 272extern unsigned int softlockup_panic; 273void lockup_detector_init(void); 274#else 275static inline void touch_softlockup_watchdog(void) 276{ 277} 278static inline void touch_softlockup_watchdog_sync(void) 279{ 280} 281static inline void touch_all_softlockup_watchdogs(void) 282{ 283} 284static inline void lockup_detector_init(void) 285{ 286} 287#endif 288 289/* Attach to any functions which should be ignored in wchan output. */ 290#define __sched __attribute__((__section__(".sched.text"))) 291 292/* Linker adds these: start and end of __sched functions */ 293extern char __sched_text_start[], __sched_text_end[]; 294 295/* Is this address in the __sched functions? */ 296extern int in_sched_functions(unsigned long addr); 297 298#define MAX_SCHEDULE_TIMEOUT LONG_MAX 299extern signed long schedule_timeout(signed long timeout); 300extern signed long schedule_timeout_interruptible(signed long timeout); 301extern signed long schedule_timeout_killable(signed long timeout); 302extern signed long schedule_timeout_uninterruptible(signed long timeout); 303asmlinkage void schedule(void); 304extern void schedule_preempt_disabled(void); 305 306struct nsproxy; 307struct user_namespace; 308 309#ifdef CONFIG_MMU 310extern void arch_pick_mmap_layout(struct mm_struct *mm); 311extern unsigned long 312arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 313 unsigned long, unsigned long); 314extern unsigned long 315arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 316 unsigned long len, unsigned long pgoff, 317 unsigned long flags); 318#else 319static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 320#endif 321 322 323extern void set_dumpable(struct mm_struct *mm, int value); 324extern int get_dumpable(struct mm_struct *mm); 325 326/* mm flags */ 327/* dumpable bits */ 328#define MMF_DUMPABLE 0 /* core dump is permitted */ 329#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ 330 331#define MMF_DUMPABLE_BITS 2 332#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) 333 334/* coredump filter bits */ 335#define MMF_DUMP_ANON_PRIVATE 2 336#define MMF_DUMP_ANON_SHARED 3 337#define MMF_DUMP_MAPPED_PRIVATE 4 338#define MMF_DUMP_MAPPED_SHARED 5 339#define MMF_DUMP_ELF_HEADERS 6 340#define MMF_DUMP_HUGETLB_PRIVATE 7 341#define MMF_DUMP_HUGETLB_SHARED 8 342 343#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 344#define MMF_DUMP_FILTER_BITS 7 345#define MMF_DUMP_FILTER_MASK \ 346 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 347#define MMF_DUMP_FILTER_DEFAULT \ 348 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ 349 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) 350 351#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS 352# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) 353#else 354# define MMF_DUMP_MASK_DEFAULT_ELF 0 355#endif 356 /* leave room for more dump flags */ 357#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ 358#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ 359#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ 360 361#define MMF_HAS_UPROBES 19 /* has uprobes */ 362#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ 363 364#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) 365 366struct sighand_struct { 367 atomic_t count; 368 struct k_sigaction action[_NSIG]; 369 spinlock_t siglock; 370 wait_queue_head_t signalfd_wqh; 371}; 372 373struct pacct_struct { 374 int ac_flag; 375 long ac_exitcode; 376 unsigned long ac_mem; 377 cputime_t ac_utime, ac_stime; 378 unsigned long ac_minflt, ac_majflt; 379}; 380 381struct cpu_itimer { 382 cputime_t expires; 383 cputime_t incr; 384 u32 error; 385 u32 incr_error; 386}; 387 388/** 389 * struct cputime - snaphsot of system and user cputime 390 * @utime: time spent in user mode 391 * @stime: time spent in system mode 392 * 393 * Gathers a generic snapshot of user and system time. 394 */ 395struct cputime { 396 cputime_t utime; 397 cputime_t stime; 398}; 399 400/** 401 * struct task_cputime - collected CPU time counts 402 * @utime: time spent in user mode, in &cputime_t units 403 * @stime: time spent in kernel mode, in &cputime_t units 404 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 405 * 406 * This is an extension of struct cputime that includes the total runtime 407 * spent by the task from the scheduler point of view. 408 * 409 * As a result, this structure groups together three kinds of CPU time 410 * that are tracked for threads and thread groups. Most things considering 411 * CPU time want to group these counts together and treat all three 412 * of them in parallel. 413 */ 414struct task_cputime { 415 cputime_t utime; 416 cputime_t stime; 417 unsigned long long sum_exec_runtime; 418}; 419/* Alternate field names when used to cache expirations. */ 420#define prof_exp stime 421#define virt_exp utime 422#define sched_exp sum_exec_runtime 423 424#define INIT_CPUTIME \ 425 (struct task_cputime) { \ 426 .utime = 0, \ 427 .stime = 0, \ 428 .sum_exec_runtime = 0, \ 429 } 430 431#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) 432 433#ifdef CONFIG_PREEMPT_COUNT 434#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 435#else 436#define PREEMPT_DISABLED PREEMPT_ENABLED 437#endif 438 439/* 440 * Disable preemption until the scheduler is running. 441 * Reset by start_kernel()->sched_init()->init_idle(). 442 * 443 * We include PREEMPT_ACTIVE to avoid cond_resched() from working 444 * before the scheduler is active -- see should_resched(). 445 */ 446#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) 447 448/** 449 * struct thread_group_cputimer - thread group interval timer counts 450 * @cputime: thread group interval timers. 451 * @running: non-zero when there are timers running and 452 * @cputime receives updates. 453 * @lock: lock for fields in this struct. 454 * 455 * This structure contains the version of task_cputime, above, that is 456 * used for thread group CPU timer calculations. 457 */ 458struct thread_group_cputimer { 459 struct task_cputime cputime; 460 int running; 461 raw_spinlock_t lock; 462}; 463 464#include <linux/rwsem.h> 465struct autogroup; 466 467/* 468 * NOTE! "signal_struct" does not have its own 469 * locking, because a shared signal_struct always 470 * implies a shared sighand_struct, so locking 471 * sighand_struct is always a proper superset of 472 * the locking of signal_struct. 473 */ 474struct signal_struct { 475 atomic_t sigcnt; 476 atomic_t live; 477 int nr_threads; 478 479 wait_queue_head_t wait_chldexit; /* for wait4() */ 480 481 /* current thread group signal load-balancing target: */ 482 struct task_struct *curr_target; 483 484 /* shared signal handling: */ 485 struct sigpending shared_pending; 486 487 /* thread group exit support */ 488 int group_exit_code; 489 /* overloaded: 490 * - notify group_exit_task when ->count is equal to notify_count 491 * - everyone except group_exit_task is stopped during signal delivery 492 * of fatal signals, group_exit_task processes the signal. 493 */ 494 int notify_count; 495 struct task_struct *group_exit_task; 496 497 /* thread group stop support, overloads group_exit_code too */ 498 int group_stop_count; 499 unsigned int flags; /* see SIGNAL_* flags below */ 500 501 /* 502 * PR_SET_CHILD_SUBREAPER marks a process, like a service 503 * manager, to re-parent orphan (double-forking) child processes 504 * to this process instead of 'init'. The service manager is 505 * able to receive SIGCHLD signals and is able to investigate 506 * the process until it calls wait(). All children of this 507 * process will inherit a flag if they should look for a 508 * child_subreaper process at exit. 509 */ 510 unsigned int is_child_subreaper:1; 511 unsigned int has_child_subreaper:1; 512 513 /* POSIX.1b Interval Timers */ 514 int posix_timer_id; 515 struct list_head posix_timers; 516 517 /* ITIMER_REAL timer for the process */ 518 struct hrtimer real_timer; 519 struct pid *leader_pid; 520 ktime_t it_real_incr; 521 522 /* 523 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use 524 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these 525 * values are defined to 0 and 1 respectively 526 */ 527 struct cpu_itimer it[2]; 528 529 /* 530 * Thread group totals for process CPU timers. 531 * See thread_group_cputimer(), et al, for details. 532 */ 533 struct thread_group_cputimer cputimer; 534 535 /* Earliest-expiration cache. */ 536 struct task_cputime cputime_expires; 537 538 struct list_head cpu_timers[3]; 539 540 struct pid *tty_old_pgrp; 541 542 /* boolean value for session group leader */ 543 int leader; 544 545 struct tty_struct *tty; /* NULL if no tty */ 546 547#ifdef CONFIG_SCHED_AUTOGROUP 548 struct autogroup *autogroup; 549#endif 550 /* 551 * Cumulative resource counters for dead threads in the group, 552 * and for reaped dead child processes forked by this group. 553 * Live threads maintain their own counters and add to these 554 * in __exit_signal, except for the group leader. 555 */ 556 cputime_t utime, stime, cutime, cstime; 557 cputime_t gtime; 558 cputime_t cgtime; 559#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 560 struct cputime prev_cputime; 561#endif 562 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 563 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 564 unsigned long inblock, oublock, cinblock, coublock; 565 unsigned long maxrss, cmaxrss; 566 struct task_io_accounting ioac; 567 568 /* 569 * Cumulative ns of schedule CPU time fo dead threads in the 570 * group, not including a zombie group leader, (This only differs 571 * from jiffies_to_ns(utime + stime) if sched_clock uses something 572 * other than jiffies.) 573 */ 574 unsigned long long sum_sched_runtime; 575 576 /* 577 * We don't bother to synchronize most readers of this at all, 578 * because there is no reader checking a limit that actually needs 579 * to get both rlim_cur and rlim_max atomically, and either one 580 * alone is a single word that can safely be read normally. 581 * getrlimit/setrlimit use task_lock(current->group_leader) to 582 * protect this instead of the siglock, because they really 583 * have no need to disable irqs. 584 */ 585 struct rlimit rlim[RLIM_NLIMITS]; 586 587#ifdef CONFIG_BSD_PROCESS_ACCT 588 struct pacct_struct pacct; /* per-process accounting information */ 589#endif 590#ifdef CONFIG_TASKSTATS 591 struct taskstats *stats; 592#endif 593#ifdef CONFIG_AUDIT 594 unsigned audit_tty; 595 unsigned audit_tty_log_passwd; 596 struct tty_audit_buf *tty_audit_buf; 597#endif 598#ifdef CONFIG_CGROUPS 599 /* 600 * group_rwsem prevents new tasks from entering the threadgroup and 601 * member tasks from exiting,a more specifically, setting of 602 * PF_EXITING. fork and exit paths are protected with this rwsem 603 * using threadgroup_change_begin/end(). Users which require 604 * threadgroup to remain stable should use threadgroup_[un]lock() 605 * which also takes care of exec path. Currently, cgroup is the 606 * only user. 607 */ 608 struct rw_semaphore group_rwsem; 609#endif 610 611 oom_flags_t oom_flags; 612 short oom_score_adj; /* OOM kill score adjustment */ 613 short oom_score_adj_min; /* OOM kill score adjustment min value. 614 * Only settable by CAP_SYS_RESOURCE. */ 615 616 struct mutex cred_guard_mutex; /* guard against foreign influences on 617 * credential calculations 618 * (notably. ptrace) */ 619}; 620 621/* 622 * Bits in flags field of signal_struct. 623 */ 624#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 625#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 626#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 627#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ 628/* 629 * Pending notifications to parent. 630 */ 631#define SIGNAL_CLD_STOPPED 0x00000010 632#define SIGNAL_CLD_CONTINUED 0x00000020 633#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 634 635#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 636 637/* If true, all threads except ->group_exit_task have pending SIGKILL */ 638static inline int signal_group_exit(const struct signal_struct *sig) 639{ 640 return (sig->flags & SIGNAL_GROUP_EXIT) || 641 (sig->group_exit_task != NULL); 642} 643 644/* 645 * Some day this will be a full-fledged user tracking system.. 646 */ 647struct user_struct { 648 atomic_t __count; /* reference count */ 649 atomic_t processes; /* How many processes does this user have? */ 650 atomic_t files; /* How many open files does this user have? */ 651 atomic_t sigpending; /* How many pending signals does this user have? */ 652#ifdef CONFIG_INOTIFY_USER 653 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 654 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 655#endif 656#ifdef CONFIG_FANOTIFY 657 atomic_t fanotify_listeners; 658#endif 659#ifdef CONFIG_EPOLL 660 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ 661#endif 662#ifdef CONFIG_POSIX_MQUEUE 663 /* protected by mq_lock */ 664 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 665#endif 666 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 667 668#ifdef CONFIG_KEYS 669 struct key *uid_keyring; /* UID specific keyring */ 670 struct key *session_keyring; /* UID's default session keyring */ 671#endif 672 673 /* Hash table maintenance information */ 674 struct hlist_node uidhash_node; 675 kuid_t uid; 676 677#ifdef CONFIG_PERF_EVENTS 678 atomic_long_t locked_vm; 679#endif 680}; 681 682extern int uids_sysfs_init(void); 683 684extern struct user_struct *find_user(kuid_t); 685 686extern struct user_struct root_user; 687#define INIT_USER (&root_user) 688 689 690struct backing_dev_info; 691struct reclaim_state; 692 693#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 694struct sched_info { 695 /* cumulative counters */ 696 unsigned long pcount; /* # of times run on this cpu */ 697 unsigned long long run_delay; /* time spent waiting on a runqueue */ 698 699 /* timestamps */ 700 unsigned long long last_arrival,/* when we last ran on a cpu */ 701 last_queued; /* when we were last queued to run */ 702}; 703#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 704 705#ifdef CONFIG_TASK_DELAY_ACCT 706struct task_delay_info { 707 spinlock_t lock; 708 unsigned int flags; /* Private per-task flags */ 709 710 /* For each stat XXX, add following, aligned appropriately 711 * 712 * struct timespec XXX_start, XXX_end; 713 * u64 XXX_delay; 714 * u32 XXX_count; 715 * 716 * Atomicity of updates to XXX_delay, XXX_count protected by 717 * single lock above (split into XXX_lock if contention is an issue). 718 */ 719 720 /* 721 * XXX_count is incremented on every XXX operation, the delay 722 * associated with the operation is added to XXX_delay. 723 * XXX_delay contains the accumulated delay time in nanoseconds. 724 */ 725 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 726 u64 blkio_delay; /* wait for sync block io completion */ 727 u64 swapin_delay; /* wait for swapin block io completion */ 728 u32 blkio_count; /* total count of the number of sync block */ 729 /* io operations performed */ 730 u32 swapin_count; /* total count of the number of swapin block */ 731 /* io operations performed */ 732 733 struct timespec freepages_start, freepages_end; 734 u64 freepages_delay; /* wait for memory reclaim */ 735 u32 freepages_count; /* total count of memory reclaim */ 736}; 737#endif /* CONFIG_TASK_DELAY_ACCT */ 738 739static inline int sched_info_on(void) 740{ 741#ifdef CONFIG_SCHEDSTATS 742 return 1; 743#elif defined(CONFIG_TASK_DELAY_ACCT) 744 extern int delayacct_on; 745 return delayacct_on; 746#else 747 return 0; 748#endif 749} 750 751enum cpu_idle_type { 752 CPU_IDLE, 753 CPU_NOT_IDLE, 754 CPU_NEWLY_IDLE, 755 CPU_MAX_IDLE_TYPES 756}; 757 758/* 759 * Increase resolution of cpu_power calculations 760 */ 761#define SCHED_POWER_SHIFT 10 762#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) 763 764/* 765 * sched-domains (multiprocessor balancing) declarations: 766 */ 767#ifdef CONFIG_SMP 768#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ 769#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ 770#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ 771#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 772#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ 773#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 774#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ 775#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 776#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 777#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 778#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 779#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ 780#define SD_NUMA 0x4000 /* cross-node balancing */ 781 782extern int __weak arch_sd_sibiling_asym_packing(void); 783 784struct sched_domain_attr { 785 int relax_domain_level; 786}; 787 788#define SD_ATTR_INIT (struct sched_domain_attr) { \ 789 .relax_domain_level = -1, \ 790} 791 792extern int sched_domain_level_max; 793 794struct sched_group; 795 796struct sched_domain { 797 /* These fields must be setup */ 798 struct sched_domain *parent; /* top domain must be null terminated */ 799 struct sched_domain *child; /* bottom domain must be null terminated */ 800 struct sched_group *groups; /* the balancing groups of the domain */ 801 unsigned long min_interval; /* Minimum balance interval ms */ 802 unsigned long max_interval; /* Maximum balance interval ms */ 803 unsigned int busy_factor; /* less balancing by factor if busy */ 804 unsigned int imbalance_pct; /* No balance until over watermark */ 805 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 806 unsigned int busy_idx; 807 unsigned int idle_idx; 808 unsigned int newidle_idx; 809 unsigned int wake_idx; 810 unsigned int forkexec_idx; 811 unsigned int smt_gain; 812 813 int nohz_idle; /* NOHZ IDLE status */ 814 int flags; /* See SD_* */ 815 int level; 816 817 /* Runtime fields. */ 818 unsigned long last_balance; /* init to jiffies. units in jiffies */ 819 unsigned int balance_interval; /* initialise to 1. units in ms. */ 820 unsigned int nr_balance_failed; /* initialise to 0 */ 821 822 u64 last_update; 823 824 /* idle_balance() stats */ 825 u64 max_newidle_lb_cost; 826 unsigned long next_decay_max_lb_cost; 827 828#ifdef CONFIG_SCHEDSTATS 829 /* load_balance() stats */ 830 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 831 unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 832 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 833 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; 834 unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 835 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 836 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 837 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 838 839 /* Active load balancing */ 840 unsigned int alb_count; 841 unsigned int alb_failed; 842 unsigned int alb_pushed; 843 844 /* SD_BALANCE_EXEC stats */ 845 unsigned int sbe_count; 846 unsigned int sbe_balanced; 847 unsigned int sbe_pushed; 848 849 /* SD_BALANCE_FORK stats */ 850 unsigned int sbf_count; 851 unsigned int sbf_balanced; 852 unsigned int sbf_pushed; 853 854 /* try_to_wake_up() stats */ 855 unsigned int ttwu_wake_remote; 856 unsigned int ttwu_move_affine; 857 unsigned int ttwu_move_balance; 858#endif 859#ifdef CONFIG_SCHED_DEBUG 860 char *name; 861#endif 862 union { 863 void *private; /* used during construction */ 864 struct rcu_head rcu; /* used during destruction */ 865 }; 866 867 unsigned int span_weight; 868 /* 869 * Span of all CPUs in this domain. 870 * 871 * NOTE: this field is variable length. (Allocated dynamically 872 * by attaching extra space to the end of the structure, 873 * depending on how many CPUs the kernel has booted up with) 874 */ 875 unsigned long span[0]; 876}; 877 878static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 879{ 880 return to_cpumask(sd->span); 881} 882 883extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 884 struct sched_domain_attr *dattr_new); 885 886/* Allocate an array of sched domains, for partition_sched_domains(). */ 887cpumask_var_t *alloc_sched_domains(unsigned int ndoms); 888void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); 889 890bool cpus_share_cache(int this_cpu, int that_cpu); 891 892#else /* CONFIG_SMP */ 893 894struct sched_domain_attr; 895 896static inline void 897partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 898 struct sched_domain_attr *dattr_new) 899{ 900} 901 902static inline bool cpus_share_cache(int this_cpu, int that_cpu) 903{ 904 return true; 905} 906 907#endif /* !CONFIG_SMP */ 908 909 910struct io_context; /* See blkdev.h */ 911 912 913#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 914extern void prefetch_stack(struct task_struct *t); 915#else 916static inline void prefetch_stack(struct task_struct *t) { } 917#endif 918 919struct audit_context; /* See audit.c */ 920struct mempolicy; 921struct pipe_inode_info; 922struct uts_namespace; 923 924struct load_weight { 925 unsigned long weight, inv_weight; 926}; 927 928struct sched_avg { 929 /* 930 * These sums represent an infinite geometric series and so are bound 931 * above by 1024/(1-y). Thus we only need a u32 to store them for all 932 * choices of y < 1-2^(-32)*1024. 933 */ 934 u32 runnable_avg_sum, runnable_avg_period; 935 u64 last_runnable_update; 936 s64 decay_count; 937 unsigned long load_avg_contrib; 938}; 939 940#ifdef CONFIG_SCHEDSTATS 941struct sched_statistics { 942 u64 wait_start; 943 u64 wait_max; 944 u64 wait_count; 945 u64 wait_sum; 946 u64 iowait_count; 947 u64 iowait_sum; 948 949 u64 sleep_start; 950 u64 sleep_max; 951 s64 sum_sleep_runtime; 952 953 u64 block_start; 954 u64 block_max; 955 u64 exec_max; 956 u64 slice_max; 957 958 u64 nr_migrations_cold; 959 u64 nr_failed_migrations_affine; 960 u64 nr_failed_migrations_running; 961 u64 nr_failed_migrations_hot; 962 u64 nr_forced_migrations; 963 964 u64 nr_wakeups; 965 u64 nr_wakeups_sync; 966 u64 nr_wakeups_migrate; 967 u64 nr_wakeups_local; 968 u64 nr_wakeups_remote; 969 u64 nr_wakeups_affine; 970 u64 nr_wakeups_affine_attempts; 971 u64 nr_wakeups_passive; 972 u64 nr_wakeups_idle; 973}; 974#endif 975 976struct sched_entity { 977 struct load_weight load; /* for load-balancing */ 978 struct rb_node run_node; 979 struct list_head group_node; 980 unsigned int on_rq; 981 982 u64 exec_start; 983 u64 sum_exec_runtime; 984 u64 vruntime; 985 u64 prev_sum_exec_runtime; 986 987 u64 nr_migrations; 988 989#ifdef CONFIG_SCHEDSTATS 990 struct sched_statistics statistics; 991#endif 992 993#ifdef CONFIG_FAIR_GROUP_SCHED 994 struct sched_entity *parent; 995 /* rq on which this entity is (to be) queued: */ 996 struct cfs_rq *cfs_rq; 997 /* rq "owned" by this entity/group: */ 998 struct cfs_rq *my_q; 999#endif 1000 1001#ifdef CONFIG_SMP 1002 /* Per-entity load-tracking */ 1003 struct sched_avg avg; 1004#endif 1005}; 1006 1007struct sched_rt_entity { 1008 struct list_head run_list; 1009 unsigned long timeout; 1010 unsigned long watchdog_stamp; 1011 unsigned int time_slice; 1012 1013 struct sched_rt_entity *back; 1014#ifdef CONFIG_RT_GROUP_SCHED 1015 struct sched_rt_entity *parent; 1016 /* rq on which this entity is (to be) queued: */ 1017 struct rt_rq *rt_rq; 1018 /* rq "owned" by this entity/group: */ 1019 struct rt_rq *my_q; 1020#endif 1021}; 1022 1023 1024struct rcu_node; 1025 1026enum perf_event_task_context { 1027 perf_invalid_context = -1, 1028 perf_hw_context = 0, 1029 perf_sw_context, 1030 perf_nr_task_contexts, 1031}; 1032 1033struct task_struct { 1034 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1035 void *stack; 1036 atomic_t usage; 1037 unsigned int flags; /* per process flags, defined below */ 1038 unsigned int ptrace; 1039 1040#ifdef CONFIG_SMP 1041 struct llist_node wake_entry; 1042 int on_cpu; 1043 struct task_struct *last_wakee; 1044 unsigned long wakee_flips; 1045 unsigned long wakee_flip_decay_ts; 1046 1047 int wake_cpu; 1048#endif 1049 int on_rq; 1050 1051 int prio, static_prio, normal_prio; 1052 unsigned int rt_priority; 1053 const struct sched_class *sched_class; 1054 struct sched_entity se; 1055 struct sched_rt_entity rt; 1056#ifdef CONFIG_CGROUP_SCHED 1057 struct task_group *sched_task_group; 1058#endif 1059 1060#ifdef CONFIG_PREEMPT_NOTIFIERS 1061 /* list of struct preempt_notifier: */ 1062 struct hlist_head preempt_notifiers; 1063#endif 1064 1065 /* 1066 * fpu_counter contains the number of consecutive context switches 1067 * that the FPU is used. If this is over a threshold, the lazy fpu 1068 * saving becomes unlazy to save the trap. This is an unsigned char 1069 * so that after 256 times the counter wraps and the behavior turns 1070 * lazy again; this to deal with bursty apps that only use FPU for 1071 * a short time 1072 */ 1073 unsigned char fpu_counter; 1074#ifdef CONFIG_BLK_DEV_IO_TRACE 1075 unsigned int btrace_seq; 1076#endif 1077 1078 unsigned int policy; 1079 int nr_cpus_allowed; 1080 cpumask_t cpus_allowed; 1081 1082#ifdef CONFIG_PREEMPT_RCU 1083 int rcu_read_lock_nesting; 1084 char rcu_read_unlock_special; 1085 struct list_head rcu_node_entry; 1086#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1087#ifdef CONFIG_TREE_PREEMPT_RCU 1088 struct rcu_node *rcu_blocked_node; 1089#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1090#ifdef CONFIG_RCU_BOOST 1091 struct rt_mutex *rcu_boost_mutex; 1092#endif /* #ifdef CONFIG_RCU_BOOST */ 1093 1094#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1095 struct sched_info sched_info; 1096#endif 1097 1098 struct list_head tasks; 1099#ifdef CONFIG_SMP 1100 struct plist_node pushable_tasks; 1101#endif 1102 1103 struct mm_struct *mm, *active_mm; 1104#ifdef CONFIG_COMPAT_BRK 1105 unsigned brk_randomized:1; 1106#endif 1107#if defined(SPLIT_RSS_COUNTING) 1108 struct task_rss_stat rss_stat; 1109#endif 1110/* task state */ 1111 int exit_state; 1112 int exit_code, exit_signal; 1113 int pdeath_signal; /* The signal sent when the parent dies */ 1114 unsigned int jobctl; /* JOBCTL_*, siglock protected */ 1115 1116 /* Used for emulating ABI behavior of previous Linux versions */ 1117 unsigned int personality; 1118 1119 unsigned did_exec:1; 1120 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1121 * execve */ 1122 unsigned in_iowait:1; 1123 1124 /* task may not gain privileges */ 1125 unsigned no_new_privs:1; 1126 1127 /* Revert to default priority/policy when forking */ 1128 unsigned sched_reset_on_fork:1; 1129 unsigned sched_contributes_to_load:1; 1130 1131 pid_t pid; 1132 pid_t tgid; 1133 1134#ifdef CONFIG_CC_STACKPROTECTOR 1135 /* Canary value for the -fstack-protector gcc feature */ 1136 unsigned long stack_canary; 1137#endif 1138 /* 1139 * pointers to (original) parent process, youngest child, younger sibling, 1140 * older sibling, respectively. (p->father can be replaced with 1141 * p->real_parent->pid) 1142 */ 1143 struct task_struct __rcu *real_parent; /* real parent process */ 1144 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ 1145 /* 1146 * children/sibling forms the list of my natural children 1147 */ 1148 struct list_head children; /* list of my children */ 1149 struct list_head sibling; /* linkage in my parent's children list */ 1150 struct task_struct *group_leader; /* threadgroup leader */ 1151 1152 /* 1153 * ptraced is the list of tasks this task is using ptrace on. 1154 * This includes both natural children and PTRACE_ATTACH targets. 1155 * p->ptrace_entry is p's link on the p->parent->ptraced list. 1156 */ 1157 struct list_head ptraced; 1158 struct list_head ptrace_entry; 1159 1160 /* PID/PID hash table linkage. */ 1161 struct pid_link pids[PIDTYPE_MAX]; 1162 struct list_head thread_group; 1163 1164 struct completion *vfork_done; /* for vfork() */ 1165 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1166 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1167 1168 cputime_t utime, stime, utimescaled, stimescaled; 1169 cputime_t gtime; 1170#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 1171 struct cputime prev_cputime; 1172#endif 1173#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1174 seqlock_t vtime_seqlock; 1175 unsigned long long vtime_snap; 1176 enum { 1177 VTIME_SLEEPING = 0, 1178 VTIME_USER, 1179 VTIME_SYS, 1180 } vtime_snap_whence; 1181#endif 1182 unsigned long nvcsw, nivcsw; /* context switch counts */ 1183 struct timespec start_time; /* monotonic time */ 1184 struct timespec real_start_time; /* boot based time */ 1185/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1186 unsigned long min_flt, maj_flt; 1187 1188 struct task_cputime cputime_expires; 1189 struct list_head cpu_timers[3]; 1190 1191/* process credentials */ 1192 const struct cred __rcu *real_cred; /* objective and real subjective task 1193 * credentials (COW) */ 1194 const struct cred __rcu *cred; /* effective (overridable) subjective task 1195 * credentials (COW) */ 1196 char comm[TASK_COMM_LEN]; /* executable name excluding path 1197 - access with [gs]et_task_comm (which lock 1198 it with task_lock()) 1199 - initialized normally by setup_new_exec */ 1200/* file system info */ 1201 int link_count, total_link_count; 1202#ifdef CONFIG_SYSVIPC 1203/* ipc stuff */ 1204 struct sysv_sem sysvsem; 1205#endif 1206#ifdef CONFIG_DETECT_HUNG_TASK 1207/* hung task detection */ 1208 unsigned long last_switch_count; 1209#endif 1210/* CPU-specific state of this task */ 1211 struct thread_struct thread; 1212/* filesystem information */ 1213 struct fs_struct *fs; 1214/* open file information */ 1215 struct files_struct *files; 1216/* namespaces */ 1217 struct nsproxy *nsproxy; 1218/* signal handlers */ 1219 struct signal_struct *signal; 1220 struct sighand_struct *sighand; 1221 1222 sigset_t blocked, real_blocked; 1223 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 1224 struct sigpending pending; 1225 1226 unsigned long sas_ss_sp; 1227 size_t sas_ss_size; 1228 int (*notifier)(void *priv); 1229 void *notifier_data; 1230 sigset_t *notifier_mask; 1231 struct callback_head *task_works; 1232 1233 struct audit_context *audit_context; 1234#ifdef CONFIG_AUDITSYSCALL 1235 kuid_t loginuid; 1236 unsigned int sessionid; 1237#endif 1238 struct seccomp seccomp; 1239 1240/* Thread group tracking */ 1241 u32 parent_exec_id; 1242 u32 self_exec_id; 1243/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, 1244 * mempolicy */ 1245 spinlock_t alloc_lock; 1246 1247 /* Protection of the PI data structures: */ 1248 raw_spinlock_t pi_lock; 1249 1250#ifdef CONFIG_RT_MUTEXES 1251 /* PI waiters blocked on a rt_mutex held by this task */ 1252 struct plist_head pi_waiters; 1253 /* Deadlock detection and priority inheritance handling */ 1254 struct rt_mutex_waiter *pi_blocked_on; 1255#endif 1256 1257#ifdef CONFIG_DEBUG_MUTEXES 1258 /* mutex deadlock detection */ 1259 struct mutex_waiter *blocked_on; 1260#endif 1261#ifdef CONFIG_TRACE_IRQFLAGS 1262 unsigned int irq_events; 1263 unsigned long hardirq_enable_ip; 1264 unsigned long hardirq_disable_ip; 1265 unsigned int hardirq_enable_event; 1266 unsigned int hardirq_disable_event; 1267 int hardirqs_enabled; 1268 int hardirq_context; 1269 unsigned long softirq_disable_ip; 1270 unsigned long softirq_enable_ip; 1271 unsigned int softirq_disable_event; 1272 unsigned int softirq_enable_event; 1273 int softirqs_enabled; 1274 int softirq_context; 1275#endif 1276#ifdef CONFIG_LOCKDEP 1277# define MAX_LOCK_DEPTH 48UL 1278 u64 curr_chain_key; 1279 int lockdep_depth; 1280 unsigned int lockdep_recursion; 1281 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1282 gfp_t lockdep_reclaim_gfp; 1283#endif 1284 1285/* journalling filesystem info */ 1286 void *journal_info; 1287 1288/* stacked block device info */ 1289 struct bio_list *bio_list; 1290 1291#ifdef CONFIG_BLOCK 1292/* stack plugging */ 1293 struct blk_plug *plug; 1294#endif 1295 1296/* VM state */ 1297 struct reclaim_state *reclaim_state; 1298 1299 struct backing_dev_info *backing_dev_info; 1300 1301 struct io_context *io_context; 1302 1303 unsigned long ptrace_message; 1304 siginfo_t *last_siginfo; /* For ptrace use. */ 1305 struct task_io_accounting ioac; 1306#if defined(CONFIG_TASK_XACCT) 1307 u64 acct_rss_mem1; /* accumulated rss usage */ 1308 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1309 cputime_t acct_timexpd; /* stime + utime since last update */ 1310#endif 1311#ifdef CONFIG_CPUSETS 1312 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1313 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ 1314 int cpuset_mem_spread_rotor; 1315 int cpuset_slab_spread_rotor; 1316#endif 1317#ifdef CONFIG_CGROUPS 1318 /* Control Group info protected by css_set_lock */ 1319 struct css_set __rcu *cgroups; 1320 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1321 struct list_head cg_list; 1322#endif 1323#ifdef CONFIG_FUTEX 1324 struct robust_list_head __user *robust_list; 1325#ifdef CONFIG_COMPAT 1326 struct compat_robust_list_head __user *compat_robust_list; 1327#endif 1328 struct list_head pi_state_list; 1329 struct futex_pi_state *pi_state_cache; 1330#endif 1331#ifdef CONFIG_PERF_EVENTS 1332 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 1333 struct mutex perf_event_mutex; 1334 struct list_head perf_event_list; 1335#endif 1336#ifdef CONFIG_NUMA 1337 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1338 short il_next; 1339 short pref_node_fork; 1340#endif 1341#ifdef CONFIG_NUMA_BALANCING 1342 int numa_scan_seq; 1343 int numa_migrate_seq; 1344 unsigned int numa_scan_period; 1345 unsigned int numa_scan_period_max; 1346 unsigned long numa_migrate_retry; 1347 u64 node_stamp; /* migration stamp */ 1348 struct callback_head numa_work; 1349 1350 struct list_head numa_entry; 1351 struct numa_group *numa_group; 1352 1353 /* 1354 * Exponential decaying average of faults on a per-node basis. 1355 * Scheduling placement decisions are made based on the these counts. 1356 * The values remain static for the duration of a PTE scan 1357 */ 1358 unsigned long *numa_faults; 1359 unsigned long total_numa_faults; 1360 1361 /* 1362 * numa_faults_buffer records faults per node during the current 1363 * scan window. When the scan completes, the counts in numa_faults 1364 * decay and these values are copied. 1365 */ 1366 unsigned long *numa_faults_buffer; 1367 1368 int numa_preferred_nid; 1369#endif /* CONFIG_NUMA_BALANCING */ 1370 1371 struct rcu_head rcu; 1372 1373 /* 1374 * cache last used pipe for splice 1375 */ 1376 struct pipe_inode_info *splice_pipe; 1377 1378 struct page_frag task_frag; 1379 1380#ifdef CONFIG_TASK_DELAY_ACCT 1381 struct task_delay_info *delays; 1382#endif 1383#ifdef CONFIG_FAULT_INJECTION 1384 int make_it_fail; 1385#endif 1386 /* 1387 * when (nr_dirtied >= nr_dirtied_pause), it's time to call 1388 * balance_dirty_pages() for some dirty throttling pause 1389 */ 1390 int nr_dirtied; 1391 int nr_dirtied_pause; 1392 unsigned long dirty_paused_when; /* start of a write-and-pause period */ 1393 1394#ifdef CONFIG_LATENCYTOP 1395 int latency_record_count; 1396 struct latency_record latency_record[LT_SAVECOUNT]; 1397#endif 1398 /* 1399 * time slack values; these are used to round up poll() and 1400 * select() etc timeout values. These are in nanoseconds. 1401 */ 1402 unsigned long timer_slack_ns; 1403 unsigned long default_timer_slack_ns; 1404 1405#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1406 /* Index of current stored address in ret_stack */ 1407 int curr_ret_stack; 1408 /* Stack of return addresses for return function tracing */ 1409 struct ftrace_ret_stack *ret_stack; 1410 /* time stamp for last schedule */ 1411 unsigned long long ftrace_timestamp; 1412 /* 1413 * Number of functions that haven't been traced 1414 * because of depth overrun. 1415 */ 1416 atomic_t trace_overrun; 1417 /* Pause for the tracing */ 1418 atomic_t tracing_graph_pause; 1419#endif 1420#ifdef CONFIG_TRACING 1421 /* state flags for use by tracers */ 1422 unsigned long trace; 1423 /* bitmask and counter of trace recursion */ 1424 unsigned long trace_recursion; 1425#endif /* CONFIG_TRACING */ 1426#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ 1427 struct memcg_batch_info { 1428 int do_batch; /* incremented when batch uncharge started */ 1429 struct mem_cgroup *memcg; /* target memcg of uncharge */ 1430 unsigned long nr_pages; /* uncharged usage */ 1431 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ 1432 } memcg_batch; 1433 unsigned int memcg_kmem_skip_account; 1434 struct memcg_oom_info { 1435 unsigned int may_oom:1; 1436 unsigned int in_memcg_oom:1; 1437 unsigned int oom_locked:1; 1438 int wakeups; 1439 struct mem_cgroup *wait_on_memcg; 1440 } memcg_oom; 1441#endif 1442#ifdef CONFIG_UPROBES 1443 struct uprobe_task *utask; 1444#endif 1445#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1446 unsigned int sequential_io; 1447 unsigned int sequential_io_avg; 1448#endif 1449}; 1450 1451/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1452#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1453 1454#define TNF_MIGRATED 0x01 1455#define TNF_NO_GROUP 0x02 1456 1457#ifdef CONFIG_NUMA_BALANCING 1458extern void task_numa_fault(int last_node, int node, int pages, int flags); 1459extern pid_t task_numa_group_id(struct task_struct *p); 1460extern void set_numabalancing_state(bool enabled); 1461#else 1462static inline void task_numa_fault(int last_node, int node, int pages, 1463 int flags) 1464{ 1465} 1466static inline pid_t task_numa_group_id(struct task_struct *p) 1467{ 1468 return 0; 1469} 1470static inline void set_numabalancing_state(bool enabled) 1471{ 1472} 1473#endif 1474 1475static inline struct pid *task_pid(struct task_struct *task) 1476{ 1477 return task->pids[PIDTYPE_PID].pid; 1478} 1479 1480static inline struct pid *task_tgid(struct task_struct *task) 1481{ 1482 return task->group_leader->pids[PIDTYPE_PID].pid; 1483} 1484 1485/* 1486 * Without tasklist or rcu lock it is not safe to dereference 1487 * the result of task_pgrp/task_session even if task == current, 1488 * we can race with another thread doing sys_setsid/sys_setpgid. 1489 */ 1490static inline struct pid *task_pgrp(struct task_struct *task) 1491{ 1492 return task->group_leader->pids[PIDTYPE_PGID].pid; 1493} 1494 1495static inline struct pid *task_session(struct task_struct *task) 1496{ 1497 return task->group_leader->pids[PIDTYPE_SID].pid; 1498} 1499 1500struct pid_namespace; 1501 1502/* 1503 * the helpers to get the task's different pids as they are seen 1504 * from various namespaces 1505 * 1506 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1507 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1508 * current. 1509 * task_xid_nr_ns() : id seen from the ns specified; 1510 * 1511 * set_task_vxid() : assigns a virtual id to a task; 1512 * 1513 * see also pid_nr() etc in include/linux/pid.h 1514 */ 1515pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 1516 struct pid_namespace *ns); 1517 1518static inline pid_t task_pid_nr(struct task_struct *tsk) 1519{ 1520 return tsk->pid; 1521} 1522 1523static inline pid_t task_pid_nr_ns(struct task_struct *tsk, 1524 struct pid_namespace *ns) 1525{ 1526 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1527} 1528 1529static inline pid_t task_pid_vnr(struct task_struct *tsk) 1530{ 1531 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1532} 1533 1534 1535static inline pid_t task_tgid_nr(struct task_struct *tsk) 1536{ 1537 return tsk->tgid; 1538} 1539 1540pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1541 1542static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1543{ 1544 return pid_vnr(task_tgid(tsk)); 1545} 1546 1547 1548static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, 1549 struct pid_namespace *ns) 1550{ 1551 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1552} 1553 1554static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1555{ 1556 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1557} 1558 1559 1560static inline pid_t task_session_nr_ns(struct task_struct *tsk, 1561 struct pid_namespace *ns) 1562{ 1563 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1564} 1565 1566static inline pid_t task_session_vnr(struct task_struct *tsk) 1567{ 1568 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1569} 1570 1571/* obsolete, do not use */ 1572static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1573{ 1574 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1575} 1576 1577/** 1578 * pid_alive - check that a task structure is not stale 1579 * @p: Task structure to be checked. 1580 * 1581 * Test if a process is not yet dead (at most zombie state) 1582 * If pid_alive fails, then pointers within the task structure 1583 * can be stale and must not be dereferenced. 1584 * 1585 * Return: 1 if the process is alive. 0 otherwise. 1586 */ 1587static inline int pid_alive(struct task_struct *p) 1588{ 1589 return p->pids[PIDTYPE_PID].pid != NULL; 1590} 1591 1592/** 1593 * is_global_init - check if a task structure is init 1594 * @tsk: Task structure to be checked. 1595 * 1596 * Check if a task structure is the first user space task the kernel created. 1597 * 1598 * Return: 1 if the task structure is init. 0 otherwise. 1599 */ 1600static inline int is_global_init(struct task_struct *tsk) 1601{ 1602 return tsk->pid == 1; 1603} 1604 1605extern struct pid *cad_pid; 1606 1607extern void free_task(struct task_struct *tsk); 1608#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 1609 1610extern void __put_task_struct(struct task_struct *t); 1611 1612static inline void put_task_struct(struct task_struct *t) 1613{ 1614 if (atomic_dec_and_test(&t->usage)) 1615 __put_task_struct(t); 1616} 1617 1618#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1619extern void task_cputime(struct task_struct *t, 1620 cputime_t *utime, cputime_t *stime); 1621extern void task_cputime_scaled(struct task_struct *t, 1622 cputime_t *utimescaled, cputime_t *stimescaled); 1623extern cputime_t task_gtime(struct task_struct *t); 1624#else 1625static inline void task_cputime(struct task_struct *t, 1626 cputime_t *utime, cputime_t *stime) 1627{ 1628 if (utime) 1629 *utime = t->utime; 1630 if (stime) 1631 *stime = t->stime; 1632} 1633 1634static inline void task_cputime_scaled(struct task_struct *t, 1635 cputime_t *utimescaled, 1636 cputime_t *stimescaled) 1637{ 1638 if (utimescaled) 1639 *utimescaled = t->utimescaled; 1640 if (stimescaled) 1641 *stimescaled = t->stimescaled; 1642} 1643 1644static inline cputime_t task_gtime(struct task_struct *t) 1645{ 1646 return t->gtime; 1647} 1648#endif 1649extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1650extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1651 1652/* 1653 * Per process flags 1654 */ 1655#define PF_EXITING 0x00000004 /* getting shut down */ 1656#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1657#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1658#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1659#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1660#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 1661#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1662#define PF_DUMPCORE 0x00000200 /* dumped core */ 1663#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1664#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1665#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 1666#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1667#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ 1668#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1669#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1670#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1671#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1672#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ 1673#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1674#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1675#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1676#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1677#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1678#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1679#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1680#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1681#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1682#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1683#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1684#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ 1685 1686/* 1687 * Only the _current_ task can read/write to tsk->flags, but other 1688 * tasks can access tsk->flags in readonly mode for example 1689 * with tsk_used_math (like during threaded core dumping). 1690 * There is however an exception to this rule during ptrace 1691 * or during fork: the ptracer task is allowed to write to the 1692 * child->flags of its traced child (same goes for fork, the parent 1693 * can write to the child->flags), because we're guaranteed the 1694 * child is not running and in turn not changing child->flags 1695 * at the same time the parent does it. 1696 */ 1697#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1698#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1699#define clear_used_math() clear_stopped_child_used_math(current) 1700#define set_used_math() set_stopped_child_used_math(current) 1701#define conditional_stopped_child_used_math(condition, child) \ 1702 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1703#define conditional_used_math(condition) \ 1704 conditional_stopped_child_used_math(condition, current) 1705#define copy_to_stopped_child_used_math(child) \ 1706 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1707/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1708#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1709#define used_math() tsk_used_math(current) 1710 1711/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */ 1712static inline gfp_t memalloc_noio_flags(gfp_t flags) 1713{ 1714 if (unlikely(current->flags & PF_MEMALLOC_NOIO)) 1715 flags &= ~__GFP_IO; 1716 return flags; 1717} 1718 1719static inline unsigned int memalloc_noio_save(void) 1720{ 1721 unsigned int flags = current->flags & PF_MEMALLOC_NOIO; 1722 current->flags |= PF_MEMALLOC_NOIO; 1723 return flags; 1724} 1725 1726static inline void memalloc_noio_restore(unsigned int flags) 1727{ 1728 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 1729} 1730 1731/* 1732 * task->jobctl flags 1733 */ 1734#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ 1735 1736#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ 1737#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ 1738#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ 1739#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ 1740#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ 1741#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ 1742#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ 1743 1744#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) 1745#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) 1746#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) 1747#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) 1748#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) 1749#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) 1750#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) 1751 1752#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) 1753#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) 1754 1755extern bool task_set_jobctl_pending(struct task_struct *task, 1756 unsigned int mask); 1757extern void task_clear_jobctl_trapping(struct task_struct *task); 1758extern void task_clear_jobctl_pending(struct task_struct *task, 1759 unsigned int mask); 1760 1761#ifdef CONFIG_PREEMPT_RCU 1762 1763#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1764#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 1765 1766static inline void rcu_copy_process(struct task_struct *p) 1767{ 1768 p->rcu_read_lock_nesting = 0; 1769 p->rcu_read_unlock_special = 0; 1770#ifdef CONFIG_TREE_PREEMPT_RCU 1771 p->rcu_blocked_node = NULL; 1772#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1773#ifdef CONFIG_RCU_BOOST 1774 p->rcu_boost_mutex = NULL; 1775#endif /* #ifdef CONFIG_RCU_BOOST */ 1776 INIT_LIST_HEAD(&p->rcu_node_entry); 1777} 1778 1779#else 1780 1781static inline void rcu_copy_process(struct task_struct *p) 1782{ 1783} 1784 1785#endif 1786 1787static inline void tsk_restore_flags(struct task_struct *task, 1788 unsigned long orig_flags, unsigned long flags) 1789{ 1790 task->flags &= ~flags; 1791 task->flags |= orig_flags & flags; 1792} 1793 1794#ifdef CONFIG_SMP 1795extern void do_set_cpus_allowed(struct task_struct *p, 1796 const struct cpumask *new_mask); 1797 1798extern int set_cpus_allowed_ptr(struct task_struct *p, 1799 const struct cpumask *new_mask); 1800#else 1801static inline void do_set_cpus_allowed(struct task_struct *p, 1802 const struct cpumask *new_mask) 1803{ 1804} 1805static inline int set_cpus_allowed_ptr(struct task_struct *p, 1806 const struct cpumask *new_mask) 1807{ 1808 if (!cpumask_test_cpu(0, new_mask)) 1809 return -EINVAL; 1810 return 0; 1811} 1812#endif 1813 1814#ifdef CONFIG_NO_HZ_COMMON 1815void calc_load_enter_idle(void); 1816void calc_load_exit_idle(void); 1817#else 1818static inline void calc_load_enter_idle(void) { } 1819static inline void calc_load_exit_idle(void) { } 1820#endif /* CONFIG_NO_HZ_COMMON */ 1821 1822#ifndef CONFIG_CPUMASK_OFFSTACK 1823static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1824{ 1825 return set_cpus_allowed_ptr(p, &new_mask); 1826} 1827#endif 1828 1829/* 1830 * Do not use outside of architecture code which knows its limitations. 1831 * 1832 * sched_clock() has no promise of monotonicity or bounded drift between 1833 * CPUs, use (which you should not) requires disabling IRQs. 1834 * 1835 * Please use one of the three interfaces below. 1836 */ 1837extern unsigned long long notrace sched_clock(void); 1838/* 1839 * See the comment in kernel/sched/clock.c 1840 */ 1841extern u64 cpu_clock(int cpu); 1842extern u64 local_clock(void); 1843extern u64 sched_clock_cpu(int cpu); 1844 1845 1846extern void sched_clock_init(void); 1847 1848#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1849static inline void sched_clock_tick(void) 1850{ 1851} 1852 1853static inline void sched_clock_idle_sleep_event(void) 1854{ 1855} 1856 1857static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 1858{ 1859} 1860#else 1861/* 1862 * Architectures can set this to 1 if they have specified 1863 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 1864 * but then during bootup it turns out that sched_clock() 1865 * is reliable after all: 1866 */ 1867extern int sched_clock_stable; 1868 1869extern void sched_clock_tick(void); 1870extern void sched_clock_idle_sleep_event(void); 1871extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1872#endif 1873 1874#ifdef CONFIG_IRQ_TIME_ACCOUNTING 1875/* 1876 * An i/f to runtime opt-in for irq time accounting based off of sched_clock. 1877 * The reason for this explicit opt-in is not to have perf penalty with 1878 * slow sched_clocks. 1879 */ 1880extern void enable_sched_clock_irqtime(void); 1881extern void disable_sched_clock_irqtime(void); 1882#else 1883static inline void enable_sched_clock_irqtime(void) {} 1884static inline void disable_sched_clock_irqtime(void) {} 1885#endif 1886 1887extern unsigned long long 1888task_sched_runtime(struct task_struct *task); 1889 1890/* sched_exec is called by processes performing an exec */ 1891#ifdef CONFIG_SMP 1892extern void sched_exec(void); 1893#else 1894#define sched_exec() {} 1895#endif 1896 1897extern void sched_clock_idle_sleep_event(void); 1898extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1899 1900#ifdef CONFIG_HOTPLUG_CPU 1901extern void idle_task_exit(void); 1902#else 1903static inline void idle_task_exit(void) {} 1904#endif 1905 1906#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) 1907extern void wake_up_nohz_cpu(int cpu); 1908#else 1909static inline void wake_up_nohz_cpu(int cpu) { } 1910#endif 1911 1912#ifdef CONFIG_NO_HZ_FULL 1913extern bool sched_can_stop_tick(void); 1914extern u64 scheduler_tick_max_deferment(void); 1915#else 1916static inline bool sched_can_stop_tick(void) { return false; } 1917#endif 1918 1919#ifdef CONFIG_SCHED_AUTOGROUP 1920extern void sched_autogroup_create_attach(struct task_struct *p); 1921extern void sched_autogroup_detach(struct task_struct *p); 1922extern void sched_autogroup_fork(struct signal_struct *sig); 1923extern void sched_autogroup_exit(struct signal_struct *sig); 1924#ifdef CONFIG_PROC_FS 1925extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 1926extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); 1927#endif 1928#else 1929static inline void sched_autogroup_create_attach(struct task_struct *p) { } 1930static inline void sched_autogroup_detach(struct task_struct *p) { } 1931static inline void sched_autogroup_fork(struct signal_struct *sig) { } 1932static inline void sched_autogroup_exit(struct signal_struct *sig) { } 1933#endif 1934 1935extern bool yield_to(struct task_struct *p, bool preempt); 1936extern void set_user_nice(struct task_struct *p, long nice); 1937extern int task_prio(const struct task_struct *p); 1938extern int task_nice(const struct task_struct *p); 1939extern int can_nice(const struct task_struct *p, const int nice); 1940extern int task_curr(const struct task_struct *p); 1941extern int idle_cpu(int cpu); 1942extern int sched_setscheduler(struct task_struct *, int, 1943 const struct sched_param *); 1944extern int sched_setscheduler_nocheck(struct task_struct *, int, 1945 const struct sched_param *); 1946extern struct task_struct *idle_task(int cpu); 1947/** 1948 * is_idle_task - is the specified task an idle task? 1949 * @p: the task in question. 1950 * 1951 * Return: 1 if @p is an idle task. 0 otherwise. 1952 */ 1953static inline bool is_idle_task(const struct task_struct *p) 1954{ 1955 return p->pid == 0; 1956} 1957extern struct task_struct *curr_task(int cpu); 1958extern void set_curr_task(int cpu, struct task_struct *p); 1959 1960void yield(void); 1961 1962/* 1963 * The default (Linux) execution domain. 1964 */ 1965extern struct exec_domain default_exec_domain; 1966 1967union thread_union { 1968 struct thread_info thread_info; 1969 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1970}; 1971 1972#ifndef __HAVE_ARCH_KSTACK_END 1973static inline int kstack_end(void *addr) 1974{ 1975 /* Reliable end of stack detection: 1976 * Some APM bios versions misalign the stack 1977 */ 1978 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 1979} 1980#endif 1981 1982extern union thread_union init_thread_union; 1983extern struct task_struct init_task; 1984 1985extern struct mm_struct init_mm; 1986 1987extern struct pid_namespace init_pid_ns; 1988 1989/* 1990 * find a task by one of its numerical ids 1991 * 1992 * find_task_by_pid_ns(): 1993 * finds a task by its pid in the specified namespace 1994 * find_task_by_vpid(): 1995 * finds a task by its virtual pid 1996 * 1997 * see also find_vpid() etc in include/linux/pid.h 1998 */ 1999 2000extern struct task_struct *find_task_by_vpid(pid_t nr); 2001extern struct task_struct *find_task_by_pid_ns(pid_t nr, 2002 struct pid_namespace *ns); 2003 2004/* per-UID process charging. */ 2005extern struct user_struct * alloc_uid(kuid_t); 2006static inline struct user_struct *get_uid(struct user_struct *u) 2007{ 2008 atomic_inc(&u->__count); 2009 return u; 2010} 2011extern void free_uid(struct user_struct *); 2012 2013#include <asm/current.h> 2014 2015extern void xtime_update(unsigned long ticks); 2016 2017extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2018extern int wake_up_process(struct task_struct *tsk); 2019extern void wake_up_new_task(struct task_struct *tsk); 2020#ifdef CONFIG_SMP 2021 extern void kick_process(struct task_struct *tsk); 2022#else 2023 static inline void kick_process(struct task_struct *tsk) { } 2024#endif 2025extern void sched_fork(unsigned long clone_flags, struct task_struct *p); 2026extern void sched_dead(struct task_struct *p); 2027 2028extern void proc_caches_init(void); 2029extern void flush_signals(struct task_struct *); 2030extern void __flush_signals(struct task_struct *); 2031extern void ignore_signals(struct task_struct *); 2032extern void flush_signal_handlers(struct task_struct *, int force_default); 2033extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 2034 2035static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 2036{ 2037 unsigned long flags; 2038 int ret; 2039 2040 spin_lock_irqsave(&tsk->sighand->siglock, flags); 2041 ret = dequeue_signal(tsk, mask, info); 2042 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2043 2044 return ret; 2045} 2046 2047extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2048 sigset_t *mask); 2049extern void unblock_all_signals(void); 2050extern void release_task(struct task_struct * p); 2051extern int send_sig_info(int, struct siginfo *, struct task_struct *); 2052extern int force_sigsegv(int, struct task_struct *); 2053extern int force_sig_info(int, struct siginfo *, struct task_struct *); 2054extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 2055extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 2056extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, 2057 const struct cred *, u32); 2058extern int kill_pgrp(struct pid *pid, int sig, int priv); 2059extern int kill_pid(struct pid *pid, int sig, int priv); 2060extern int kill_proc_info(int, struct siginfo *, pid_t); 2061extern __must_check bool do_notify_parent(struct task_struct *, int); 2062extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2063extern void force_sig(int, struct task_struct *); 2064extern int send_sig(int, struct task_struct *, int); 2065extern int zap_other_threads(struct task_struct *p); 2066extern struct sigqueue *sigqueue_alloc(void); 2067extern void sigqueue_free(struct sigqueue *); 2068extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2069extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 2070 2071static inline void restore_saved_sigmask(void) 2072{ 2073 if (test_and_clear_restore_sigmask()) 2074 __set_current_blocked(¤t->saved_sigmask); 2075} 2076 2077static inline sigset_t *sigmask_to_save(void) 2078{ 2079 sigset_t *res = ¤t->blocked; 2080 if (unlikely(test_restore_sigmask())) 2081 res = ¤t->saved_sigmask; 2082 return res; 2083} 2084 2085static inline int kill_cad_pid(int sig, int priv) 2086{ 2087 return kill_pid(cad_pid, sig, priv); 2088} 2089 2090/* These can be the second arg to send_sig_info/send_group_sig_info. */ 2091#define SEND_SIG_NOINFO ((struct siginfo *) 0) 2092#define SEND_SIG_PRIV ((struct siginfo *) 1) 2093#define SEND_SIG_FORCED ((struct siginfo *) 2) 2094 2095/* 2096 * True if we are on the alternate signal stack. 2097 */ 2098static inline int on_sig_stack(unsigned long sp) 2099{ 2100#ifdef CONFIG_STACK_GROWSUP 2101 return sp >= current->sas_ss_sp && 2102 sp - current->sas_ss_sp < current->sas_ss_size; 2103#else 2104 return sp > current->sas_ss_sp && 2105 sp - current->sas_ss_sp <= current->sas_ss_size; 2106#endif 2107} 2108 2109static inline int sas_ss_flags(unsigned long sp) 2110{ 2111 return (current->sas_ss_size == 0 ? SS_DISABLE 2112 : on_sig_stack(sp) ? SS_ONSTACK : 0); 2113} 2114 2115static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) 2116{ 2117 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) 2118#ifdef CONFIG_STACK_GROWSUP 2119 return current->sas_ss_sp; 2120#else 2121 return current->sas_ss_sp + current->sas_ss_size; 2122#endif 2123 return sp; 2124} 2125 2126/* 2127 * Routines for handling mm_structs 2128 */ 2129extern struct mm_struct * mm_alloc(void); 2130 2131/* mmdrop drops the mm and the page tables */ 2132extern void __mmdrop(struct mm_struct *); 2133static inline void mmdrop(struct mm_struct * mm) 2134{ 2135 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2136 __mmdrop(mm); 2137} 2138 2139/* mmput gets rid of the mappings and all user-space */ 2140extern void mmput(struct mm_struct *); 2141/* Grab a reference to a task's mm, if it is not already going away */ 2142extern struct mm_struct *get_task_mm(struct task_struct *task); 2143/* 2144 * Grab a reference to a task's mm, if it is not already going away 2145 * and ptrace_may_access with the mode parameter passed to it 2146 * succeeds. 2147 */ 2148extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 2149/* Remove the current tasks stale references to the old mm_struct */ 2150extern void mm_release(struct task_struct *, struct mm_struct *); 2151/* Allocate a new mm structure and copy contents from tsk->mm */ 2152extern struct mm_struct *dup_mm(struct task_struct *tsk); 2153 2154extern int copy_thread(unsigned long, unsigned long, unsigned long, 2155 struct task_struct *); 2156extern void flush_thread(void); 2157extern void exit_thread(void); 2158 2159extern void exit_files(struct task_struct *); 2160extern void __cleanup_sighand(struct sighand_struct *); 2161 2162extern void exit_itimers(struct signal_struct *); 2163extern void flush_itimer_signals(void); 2164 2165extern void do_group_exit(int); 2166 2167extern int allow_signal(int); 2168extern int disallow_signal(int); 2169 2170extern int do_execve(const char *, 2171 const char __user * const __user *, 2172 const char __user * const __user *); 2173extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 2174struct task_struct *fork_idle(int); 2175extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 2176 2177extern void set_task_comm(struct task_struct *tsk, char *from); 2178extern char *get_task_comm(char *to, struct task_struct *tsk); 2179 2180#ifdef CONFIG_SMP 2181void scheduler_ipi(void); 2182extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2183#else 2184static inline void scheduler_ipi(void) { } 2185static inline unsigned long wait_task_inactive(struct task_struct *p, 2186 long match_state) 2187{ 2188 return 1; 2189} 2190#endif 2191 2192#define next_task(p) \ 2193 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 2194 2195#define for_each_process(p) \ 2196 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2197 2198extern bool current_is_single_threaded(void); 2199 2200/* 2201 * Careful: do_each_thread/while_each_thread is a double loop so 2202 * 'break' will not work as expected - use goto instead. 2203 */ 2204#define do_each_thread(g, t) \ 2205 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 2206 2207#define while_each_thread(g, t) \ 2208 while ((t = next_thread(t)) != g) 2209 2210static inline int get_nr_threads(struct task_struct *tsk) 2211{ 2212 return tsk->signal->nr_threads; 2213} 2214 2215static inline bool thread_group_leader(struct task_struct *p) 2216{ 2217 return p->exit_signal >= 0; 2218} 2219 2220/* Do to the insanities of de_thread it is possible for a process 2221 * to have the pid of the thread group leader without actually being 2222 * the thread group leader. For iteration through the pids in proc 2223 * all we care about is that we have a task with the appropriate 2224 * pid, we don't actually care if we have the right task. 2225 */ 2226static inline bool has_group_leader_pid(struct task_struct *p) 2227{ 2228 return task_pid(p) == p->signal->leader_pid; 2229} 2230 2231static inline 2232bool same_thread_group(struct task_struct *p1, struct task_struct *p2) 2233{ 2234 return p1->signal == p2->signal; 2235} 2236 2237static inline struct task_struct *next_thread(const struct task_struct *p) 2238{ 2239 return list_entry_rcu(p->thread_group.next, 2240 struct task_struct, thread_group); 2241} 2242 2243static inline int thread_group_empty(struct task_struct *p) 2244{ 2245 return list_empty(&p->thread_group); 2246} 2247 2248#define delay_group_leader(p) \ 2249 (thread_group_leader(p) && !thread_group_empty(p)) 2250 2251/* 2252 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2253 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2254 * pins the final release of task.io_context. Also protects ->cpuset and 2255 * ->cgroup.subsys[]. And ->vfork_done. 2256 * 2257 * Nests both inside and outside of read_lock(&tasklist_lock). 2258 * It must not be nested with write_lock_irq(&tasklist_lock), 2259 * neither inside nor outside. 2260 */ 2261static inline void task_lock(struct task_struct *p) 2262{ 2263 spin_lock(&p->alloc_lock); 2264} 2265 2266static inline void task_unlock(struct task_struct *p) 2267{ 2268 spin_unlock(&p->alloc_lock); 2269} 2270 2271extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2272 unsigned long *flags); 2273 2274static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, 2275 unsigned long *flags) 2276{ 2277 struct sighand_struct *ret; 2278 2279 ret = __lock_task_sighand(tsk, flags); 2280 (void)__cond_lock(&tsk->sighand->siglock, ret); 2281 return ret; 2282} 2283 2284static inline void unlock_task_sighand(struct task_struct *tsk, 2285 unsigned long *flags) 2286{ 2287 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2288} 2289 2290#ifdef CONFIG_CGROUPS 2291static inline void threadgroup_change_begin(struct task_struct *tsk) 2292{ 2293 down_read(&tsk->signal->group_rwsem); 2294} 2295static inline void threadgroup_change_end(struct task_struct *tsk) 2296{ 2297 up_read(&tsk->signal->group_rwsem); 2298} 2299 2300/** 2301 * threadgroup_lock - lock threadgroup 2302 * @tsk: member task of the threadgroup to lock 2303 * 2304 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter 2305 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or 2306 * change ->group_leader/pid. This is useful for cases where the threadgroup 2307 * needs to stay stable across blockable operations. 2308 * 2309 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for 2310 * synchronization. While held, no new task will be added to threadgroup 2311 * and no existing live task will have its PF_EXITING set. 2312 * 2313 * de_thread() does threadgroup_change_{begin|end}() when a non-leader 2314 * sub-thread becomes a new leader. 2315 */ 2316static inline void threadgroup_lock(struct task_struct *tsk) 2317{ 2318 down_write(&tsk->signal->group_rwsem); 2319} 2320 2321/** 2322 * threadgroup_unlock - unlock threadgroup 2323 * @tsk: member task of the threadgroup to unlock 2324 * 2325 * Reverse threadgroup_lock(). 2326 */ 2327static inline void threadgroup_unlock(struct task_struct *tsk) 2328{ 2329 up_write(&tsk->signal->group_rwsem); 2330} 2331#else 2332static inline void threadgroup_change_begin(struct task_struct *tsk) {} 2333static inline void threadgroup_change_end(struct task_struct *tsk) {} 2334static inline void threadgroup_lock(struct task_struct *tsk) {} 2335static inline void threadgroup_unlock(struct task_struct *tsk) {} 2336#endif 2337 2338#ifndef __HAVE_THREAD_FUNCTIONS 2339 2340#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2341#define task_stack_page(task) ((task)->stack) 2342 2343static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 2344{ 2345 *task_thread_info(p) = *task_thread_info(org); 2346 task_thread_info(p)->task = p; 2347} 2348 2349static inline unsigned long *end_of_stack(struct task_struct *p) 2350{ 2351 return (unsigned long *)(task_thread_info(p) + 1); 2352} 2353 2354#endif 2355 2356static inline int object_is_on_stack(void *obj) 2357{ 2358 void *stack = task_stack_page(current); 2359 2360 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 2361} 2362 2363extern void thread_info_cache_init(void); 2364 2365#ifdef CONFIG_DEBUG_STACK_USAGE 2366static inline unsigned long stack_not_used(struct task_struct *p) 2367{ 2368 unsigned long *n = end_of_stack(p); 2369 2370 do { /* Skip over canary */ 2371 n++; 2372 } while (!*n); 2373 2374 return (unsigned long)n - (unsigned long)end_of_stack(p); 2375} 2376#endif 2377 2378/* set thread flags in other task's structures 2379 * - see asm/thread_info.h for TIF_xxxx flags available 2380 */ 2381static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 2382{ 2383 set_ti_thread_flag(task_thread_info(tsk), flag); 2384} 2385 2386static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2387{ 2388 clear_ti_thread_flag(task_thread_info(tsk), flag); 2389} 2390 2391static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2392{ 2393 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2394} 2395 2396static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2397{ 2398 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2399} 2400 2401static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2402{ 2403 return test_ti_thread_flag(task_thread_info(tsk), flag); 2404} 2405 2406static inline void set_tsk_need_resched(struct task_struct *tsk) 2407{ 2408 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2409} 2410 2411static inline void clear_tsk_need_resched(struct task_struct *tsk) 2412{ 2413 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2414} 2415 2416static inline int test_tsk_need_resched(struct task_struct *tsk) 2417{ 2418 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2419} 2420 2421static inline int restart_syscall(void) 2422{ 2423 set_tsk_thread_flag(current, TIF_SIGPENDING); 2424 return -ERESTARTNOINTR; 2425} 2426 2427static inline int signal_pending(struct task_struct *p) 2428{ 2429 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2430} 2431 2432static inline int __fatal_signal_pending(struct task_struct *p) 2433{ 2434 return unlikely(sigismember(&p->pending.signal, SIGKILL)); 2435} 2436 2437static inline int fatal_signal_pending(struct task_struct *p) 2438{ 2439 return signal_pending(p) && __fatal_signal_pending(p); 2440} 2441 2442static inline int signal_pending_state(long state, struct task_struct *p) 2443{ 2444 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 2445 return 0; 2446 if (!signal_pending(p)) 2447 return 0; 2448 2449 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2450} 2451 2452/* 2453 * cond_resched() and cond_resched_lock(): latency reduction via 2454 * explicit rescheduling in places that are safe. The return 2455 * value indicates whether a reschedule was done in fact. 2456 * cond_resched_lock() will drop the spinlock before scheduling, 2457 * cond_resched_softirq() will enable bhs before scheduling. 2458 */ 2459extern int _cond_resched(void); 2460 2461#define cond_resched() ({ \ 2462 __might_sleep(__FILE__, __LINE__, 0); \ 2463 _cond_resched(); \ 2464}) 2465 2466extern int __cond_resched_lock(spinlock_t *lock); 2467 2468#ifdef CONFIG_PREEMPT_COUNT 2469#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET 2470#else 2471#define PREEMPT_LOCK_OFFSET 0 2472#endif 2473 2474#define cond_resched_lock(lock) ({ \ 2475 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ 2476 __cond_resched_lock(lock); \ 2477}) 2478 2479extern int __cond_resched_softirq(void); 2480 2481#define cond_resched_softirq() ({ \ 2482 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 2483 __cond_resched_softirq(); \ 2484}) 2485 2486static inline void cond_resched_rcu(void) 2487{ 2488#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 2489 rcu_read_unlock(); 2490 cond_resched(); 2491 rcu_read_lock(); 2492#endif 2493} 2494 2495/* 2496 * Does a critical section need to be broken due to another 2497 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 2498 * but a general need for low latency) 2499 */ 2500static inline int spin_needbreak(spinlock_t *lock) 2501{ 2502#ifdef CONFIG_PREEMPT 2503 return spin_is_contended(lock); 2504#else 2505 return 0; 2506#endif 2507} 2508 2509/* 2510 * Idle thread specific functions to determine the need_resched 2511 * polling state. We have two versions, one based on TS_POLLING in 2512 * thread_info.status and one based on TIF_POLLING_NRFLAG in 2513 * thread_info.flags 2514 */ 2515#ifdef TS_POLLING 2516static inline int tsk_is_polling(struct task_struct *p) 2517{ 2518 return task_thread_info(p)->status & TS_POLLING; 2519} 2520static inline void __current_set_polling(void) 2521{ 2522 current_thread_info()->status |= TS_POLLING; 2523} 2524 2525static inline bool __must_check current_set_polling_and_test(void) 2526{ 2527 __current_set_polling(); 2528 2529 /* 2530 * Polling state must be visible before we test NEED_RESCHED, 2531 * paired by resched_task() 2532 */ 2533 smp_mb(); 2534 2535 return unlikely(tif_need_resched()); 2536} 2537 2538static inline void __current_clr_polling(void) 2539{ 2540 current_thread_info()->status &= ~TS_POLLING; 2541} 2542 2543static inline bool __must_check current_clr_polling_and_test(void) 2544{ 2545 __current_clr_polling(); 2546 2547 /* 2548 * Polling state must be visible before we test NEED_RESCHED, 2549 * paired by resched_task() 2550 */ 2551 smp_mb(); 2552 2553 return unlikely(tif_need_resched()); 2554} 2555#elif defined(TIF_POLLING_NRFLAG) 2556static inline int tsk_is_polling(struct task_struct *p) 2557{ 2558 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); 2559} 2560 2561static inline void __current_set_polling(void) 2562{ 2563 set_thread_flag(TIF_POLLING_NRFLAG); 2564} 2565 2566static inline bool __must_check current_set_polling_and_test(void) 2567{ 2568 __current_set_polling(); 2569 2570 /* 2571 * Polling state must be visible before we test NEED_RESCHED, 2572 * paired by resched_task() 2573 * 2574 * XXX: assumes set/clear bit are identical barrier wise. 2575 */ 2576 smp_mb__after_clear_bit(); 2577 2578 return unlikely(tif_need_resched()); 2579} 2580 2581static inline void __current_clr_polling(void) 2582{ 2583 clear_thread_flag(TIF_POLLING_NRFLAG); 2584} 2585 2586static inline bool __must_check current_clr_polling_and_test(void) 2587{ 2588 __current_clr_polling(); 2589 2590 /* 2591 * Polling state must be visible before we test NEED_RESCHED, 2592 * paired by resched_task() 2593 */ 2594 smp_mb__after_clear_bit(); 2595 2596 return unlikely(tif_need_resched()); 2597} 2598 2599#else 2600static inline int tsk_is_polling(struct task_struct *p) { return 0; } 2601static inline void __current_set_polling(void) { } 2602static inline void __current_clr_polling(void) { } 2603 2604static inline bool __must_check current_set_polling_and_test(void) 2605{ 2606 return unlikely(tif_need_resched()); 2607} 2608static inline bool __must_check current_clr_polling_and_test(void) 2609{ 2610 return unlikely(tif_need_resched()); 2611} 2612#endif 2613 2614static __always_inline bool need_resched(void) 2615{ 2616 return unlikely(tif_need_resched()); 2617} 2618 2619/* 2620 * Thread group CPU time accounting. 2621 */ 2622void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 2623void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 2624 2625static inline void thread_group_cputime_init(struct signal_struct *sig) 2626{ 2627 raw_spin_lock_init(&sig->cputimer.lock); 2628} 2629 2630/* 2631 * Reevaluate whether the task has signals pending delivery. 2632 * Wake the task if so. 2633 * This is required every time the blocked sigset_t changes. 2634 * callers must hold sighand->siglock. 2635 */ 2636extern void recalc_sigpending_and_wake(struct task_struct *t); 2637extern void recalc_sigpending(void); 2638 2639extern void signal_wake_up_state(struct task_struct *t, unsigned int state); 2640 2641static inline void signal_wake_up(struct task_struct *t, bool resume) 2642{ 2643 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); 2644} 2645static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) 2646{ 2647 signal_wake_up_state(t, resume ? __TASK_TRACED : 0); 2648} 2649 2650/* 2651 * Wrappers for p->thread_info->cpu access. No-op on UP. 2652 */ 2653#ifdef CONFIG_SMP 2654 2655static inline unsigned int task_cpu(const struct task_struct *p) 2656{ 2657 return task_thread_info(p)->cpu; 2658} 2659 2660extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2661 2662#else 2663 2664static inline unsigned int task_cpu(const struct task_struct *p) 2665{ 2666 return 0; 2667} 2668 2669static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2670{ 2671} 2672 2673#endif /* CONFIG_SMP */ 2674 2675extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2676extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2677 2678#ifdef CONFIG_CGROUP_SCHED 2679extern struct task_group root_task_group; 2680#endif /* CONFIG_CGROUP_SCHED */ 2681 2682extern int task_can_switch_user(struct user_struct *up, 2683 struct task_struct *tsk); 2684 2685#ifdef CONFIG_TASK_XACCT 2686static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2687{ 2688 tsk->ioac.rchar += amt; 2689} 2690 2691static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2692{ 2693 tsk->ioac.wchar += amt; 2694} 2695 2696static inline void inc_syscr(struct task_struct *tsk) 2697{ 2698 tsk->ioac.syscr++; 2699} 2700 2701static inline void inc_syscw(struct task_struct *tsk) 2702{ 2703 tsk->ioac.syscw++; 2704} 2705#else 2706static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2707{ 2708} 2709 2710static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2711{ 2712} 2713 2714static inline void inc_syscr(struct task_struct *tsk) 2715{ 2716} 2717 2718static inline void inc_syscw(struct task_struct *tsk) 2719{ 2720} 2721#endif 2722 2723#ifndef TASK_SIZE_OF 2724#define TASK_SIZE_OF(tsk) TASK_SIZE 2725#endif 2726 2727#ifdef CONFIG_MM_OWNER 2728extern void mm_update_next_owner(struct mm_struct *mm); 2729extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2730#else 2731static inline void mm_update_next_owner(struct mm_struct *mm) 2732{ 2733} 2734 2735static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 2736{ 2737} 2738#endif /* CONFIG_MM_OWNER */ 2739 2740static inline unsigned long task_rlimit(const struct task_struct *tsk, 2741 unsigned int limit) 2742{ 2743 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); 2744} 2745 2746static inline unsigned long task_rlimit_max(const struct task_struct *tsk, 2747 unsigned int limit) 2748{ 2749 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); 2750} 2751 2752static inline unsigned long rlimit(unsigned int limit) 2753{ 2754 return task_rlimit(current, limit); 2755} 2756 2757static inline unsigned long rlimit_max(unsigned int limit) 2758{ 2759 return task_rlimit_max(current, limit); 2760} 2761 2762#endif 2763