sched.h revision 77e4ef99d1c596a31747668e5fd837f77b6349b6
1#ifndef _LINUX_SCHED_H 2#define _LINUX_SCHED_H 3 4/* 5 * cloning flags: 6 */ 7#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ 8#define CLONE_VM 0x00000100 /* set if VM shared between processes */ 9#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ 10#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ 11#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ 12#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ 13#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 14#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ 15#define CLONE_THREAD 0x00010000 /* Same thread group? */ 16#define CLONE_NEWNS 0x00020000 /* New namespace group? */ 17#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ 18#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ 19#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ 20#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ 21#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ 22#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ 23#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ 24/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state) 25 and is now available for re-use. */ 26#define CLONE_NEWUTS 0x04000000 /* New utsname group? */ 27#define CLONE_NEWIPC 0x08000000 /* New ipcs */ 28#define CLONE_NEWUSER 0x10000000 /* New user namespace */ 29#define CLONE_NEWPID 0x20000000 /* New pid namespace */ 30#define CLONE_NEWNET 0x40000000 /* New network namespace */ 31#define CLONE_IO 0x80000000 /* Clone io context */ 32 33/* 34 * Scheduling policies 35 */ 36#define SCHED_NORMAL 0 37#define SCHED_FIFO 1 38#define SCHED_RR 2 39#define SCHED_BATCH 3 40/* SCHED_ISO: reserved but not implemented yet */ 41#define SCHED_IDLE 5 42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ 43#define SCHED_RESET_ON_FORK 0x40000000 44 45#ifdef __KERNEL__ 46 47struct sched_param { 48 int sched_priority; 49}; 50 51#include <asm/param.h> /* for HZ */ 52 53#include <linux/capability.h> 54#include <linux/threads.h> 55#include <linux/kernel.h> 56#include <linux/types.h> 57#include <linux/timex.h> 58#include <linux/jiffies.h> 59#include <linux/rbtree.h> 60#include <linux/thread_info.h> 61#include <linux/cpumask.h> 62#include <linux/errno.h> 63#include <linux/nodemask.h> 64#include <linux/mm_types.h> 65 66#include <asm/system.h> 67#include <asm/page.h> 68#include <asm/ptrace.h> 69#include <asm/cputime.h> 70 71#include <linux/smp.h> 72#include <linux/sem.h> 73#include <linux/signal.h> 74#include <linux/compiler.h> 75#include <linux/completion.h> 76#include <linux/pid.h> 77#include <linux/percpu.h> 78#include <linux/topology.h> 79#include <linux/proportions.h> 80#include <linux/seccomp.h> 81#include <linux/rcupdate.h> 82#include <linux/rculist.h> 83#include <linux/rtmutex.h> 84 85#include <linux/time.h> 86#include <linux/param.h> 87#include <linux/resource.h> 88#include <linux/timer.h> 89#include <linux/hrtimer.h> 90#include <linux/task_io_accounting.h> 91#include <linux/latencytop.h> 92#include <linux/cred.h> 93#include <linux/llist.h> 94 95#include <asm/processor.h> 96 97struct exec_domain; 98struct futex_pi_state; 99struct robust_list_head; 100struct bio_list; 101struct fs_struct; 102struct perf_event_context; 103struct blk_plug; 104 105/* 106 * List of flags we want to share for kernel threads, 107 * if only because they are not used by them anyway. 108 */ 109#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) 110 111/* 112 * These are the constant used to fake the fixed-point load-average 113 * counting. Some notes: 114 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 115 * a load-average precision of 10 bits integer + 11 bits fractional 116 * - if you want to count load-averages more often, you need more 117 * precision, or rounding will get you. With 2-second counting freq, 118 * the EXP_n values would be 1981, 2034 and 2043 if still using only 119 * 11 bit fractions. 120 */ 121extern unsigned long avenrun[]; /* Load averages */ 122extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); 123 124#define FSHIFT 11 /* nr of bits of precision */ 125#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 126#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ 127#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 128#define EXP_5 2014 /* 1/exp(5sec/5min) */ 129#define EXP_15 2037 /* 1/exp(5sec/15min) */ 130 131#define CALC_LOAD(load,exp,n) \ 132 load *= exp; \ 133 load += n*(FIXED_1-exp); \ 134 load >>= FSHIFT; 135 136extern unsigned long total_forks; 137extern int nr_threads; 138DECLARE_PER_CPU(unsigned long, process_counts); 139extern int nr_processes(void); 140extern unsigned long nr_running(void); 141extern unsigned long nr_uninterruptible(void); 142extern unsigned long nr_iowait(void); 143extern unsigned long nr_iowait_cpu(int cpu); 144extern unsigned long this_cpu_load(void); 145 146 147extern void calc_global_load(unsigned long ticks); 148 149extern unsigned long get_parent_ip(unsigned long addr); 150 151struct seq_file; 152struct cfs_rq; 153struct task_group; 154#ifdef CONFIG_SCHED_DEBUG 155extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 156extern void proc_sched_set_task(struct task_struct *p); 157extern void 158print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 159#else 160static inline void 161proc_sched_show_task(struct task_struct *p, struct seq_file *m) 162{ 163} 164static inline void proc_sched_set_task(struct task_struct *p) 165{ 166} 167static inline void 168print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 169{ 170} 171#endif 172 173/* 174 * Task state bitmask. NOTE! These bits are also 175 * encoded in fs/proc/array.c: get_task_state(). 176 * 177 * We have two separate sets of flags: task->state 178 * is about runnability, while task->exit_state are 179 * about the task exiting. Confusing, but this way 180 * modifying one set can't modify the other one by 181 * mistake. 182 */ 183#define TASK_RUNNING 0 184#define TASK_INTERRUPTIBLE 1 185#define TASK_UNINTERRUPTIBLE 2 186#define __TASK_STOPPED 4 187#define __TASK_TRACED 8 188/* in tsk->exit_state */ 189#define EXIT_ZOMBIE 16 190#define EXIT_DEAD 32 191/* in tsk->state again */ 192#define TASK_DEAD 64 193#define TASK_WAKEKILL 128 194#define TASK_WAKING 256 195#define TASK_STATE_MAX 512 196 197#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" 198 199extern char ___assert_task_state[1 - 2*!!( 200 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 201 202/* Convenience macros for the sake of set_task_state */ 203#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 204#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 205#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 206 207/* Convenience macros for the sake of wake_up */ 208#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 209#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 210 211/* get_task_state() */ 212#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 213 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 214 __TASK_TRACED) 215 216#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 217#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 218#define task_is_dead(task) ((task)->exit_state != 0) 219#define task_is_stopped_or_traced(task) \ 220 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 221#define task_contributes_to_load(task) \ 222 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 223 (task->flags & PF_FROZEN) == 0) 224 225#define __set_task_state(tsk, state_value) \ 226 do { (tsk)->state = (state_value); } while (0) 227#define set_task_state(tsk, state_value) \ 228 set_mb((tsk)->state, (state_value)) 229 230/* 231 * set_current_state() includes a barrier so that the write of current->state 232 * is correctly serialised wrt the caller's subsequent test of whether to 233 * actually sleep: 234 * 235 * set_current_state(TASK_UNINTERRUPTIBLE); 236 * if (do_i_need_to_sleep()) 237 * schedule(); 238 * 239 * If the caller does not need such serialisation then use __set_current_state() 240 */ 241#define __set_current_state(state_value) \ 242 do { current->state = (state_value); } while (0) 243#define set_current_state(state_value) \ 244 set_mb(current->state, (state_value)) 245 246/* Task command name length */ 247#define TASK_COMM_LEN 16 248 249#include <linux/spinlock.h> 250 251/* 252 * This serializes "schedule()" and also protects 253 * the run-queue from deletions/modifications (but 254 * _adding_ to the beginning of the run-queue has 255 * a separate lock). 256 */ 257extern rwlock_t tasklist_lock; 258extern spinlock_t mmlist_lock; 259 260struct task_struct; 261 262#ifdef CONFIG_PROVE_RCU 263extern int lockdep_tasklist_lock_is_held(void); 264#endif /* #ifdef CONFIG_PROVE_RCU */ 265 266extern void sched_init(void); 267extern void sched_init_smp(void); 268extern asmlinkage void schedule_tail(struct task_struct *prev); 269extern void init_idle(struct task_struct *idle, int cpu); 270extern void init_idle_bootup_task(struct task_struct *idle); 271 272extern int runqueue_is_locked(int cpu); 273 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 275extern void select_nohz_load_balancer(int stop_tick); 276extern int get_nohz_timer_target(void); 277#else 278static inline void select_nohz_load_balancer(int stop_tick) { } 279#endif 280 281/* 282 * Only dump TASK_* tasks. (0 for all tasks) 283 */ 284extern void show_state_filter(unsigned long state_filter); 285 286static inline void show_state(void) 287{ 288 show_state_filter(0); 289} 290 291extern void show_regs(struct pt_regs *); 292 293/* 294 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current 295 * task), SP is the stack pointer of the first frame that should be shown in the back 296 * trace (or NULL if the entire call-chain of the task should be shown). 297 */ 298extern void show_stack(struct task_struct *task, unsigned long *sp); 299 300void io_schedule(void); 301long io_schedule_timeout(long timeout); 302 303extern void cpu_init (void); 304extern void trap_init(void); 305extern void update_process_times(int user); 306extern void scheduler_tick(void); 307 308extern void sched_show_task(struct task_struct *p); 309 310#ifdef CONFIG_LOCKUP_DETECTOR 311extern void touch_softlockup_watchdog(void); 312extern void touch_softlockup_watchdog_sync(void); 313extern void touch_all_softlockup_watchdogs(void); 314extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, 315 void __user *buffer, 316 size_t *lenp, loff_t *ppos); 317extern unsigned int softlockup_panic; 318void lockup_detector_init(void); 319#else 320static inline void touch_softlockup_watchdog(void) 321{ 322} 323static inline void touch_softlockup_watchdog_sync(void) 324{ 325} 326static inline void touch_all_softlockup_watchdogs(void) 327{ 328} 329static inline void lockup_detector_init(void) 330{ 331} 332#endif 333 334#ifdef CONFIG_DETECT_HUNG_TASK 335extern unsigned int sysctl_hung_task_panic; 336extern unsigned long sysctl_hung_task_check_count; 337extern unsigned long sysctl_hung_task_timeout_secs; 338extern unsigned long sysctl_hung_task_warnings; 339extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 340 void __user *buffer, 341 size_t *lenp, loff_t *ppos); 342#else 343/* Avoid need for ifdefs elsewhere in the code */ 344enum { sysctl_hung_task_timeout_secs = 0 }; 345#endif 346 347/* Attach to any functions which should be ignored in wchan output. */ 348#define __sched __attribute__((__section__(".sched.text"))) 349 350/* Linker adds these: start and end of __sched functions */ 351extern char __sched_text_start[], __sched_text_end[]; 352 353/* Is this address in the __sched functions? */ 354extern int in_sched_functions(unsigned long addr); 355 356#define MAX_SCHEDULE_TIMEOUT LONG_MAX 357extern signed long schedule_timeout(signed long timeout); 358extern signed long schedule_timeout_interruptible(signed long timeout); 359extern signed long schedule_timeout_killable(signed long timeout); 360extern signed long schedule_timeout_uninterruptible(signed long timeout); 361asmlinkage void schedule(void); 362extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); 363 364struct nsproxy; 365struct user_namespace; 366 367/* 368 * Default maximum number of active map areas, this limits the number of vmas 369 * per mm struct. Users can overwrite this number by sysctl but there is a 370 * problem. 371 * 372 * When a program's coredump is generated as ELF format, a section is created 373 * per a vma. In ELF, the number of sections is represented in unsigned short. 374 * This means the number of sections should be smaller than 65535 at coredump. 375 * Because the kernel adds some informative sections to a image of program at 376 * generating coredump, we need some margin. The number of extra sections is 377 * 1-3 now and depends on arch. We use "5" as safe margin, here. 378 */ 379#define MAPCOUNT_ELF_CORE_MARGIN (5) 380#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 381 382extern int sysctl_max_map_count; 383 384#include <linux/aio.h> 385 386#ifdef CONFIG_MMU 387extern void arch_pick_mmap_layout(struct mm_struct *mm); 388extern unsigned long 389arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 390 unsigned long, unsigned long); 391extern unsigned long 392arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 393 unsigned long len, unsigned long pgoff, 394 unsigned long flags); 395extern void arch_unmap_area(struct mm_struct *, unsigned long); 396extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 397#else 398static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 399#endif 400 401 402extern void set_dumpable(struct mm_struct *mm, int value); 403extern int get_dumpable(struct mm_struct *mm); 404 405/* mm flags */ 406/* dumpable bits */ 407#define MMF_DUMPABLE 0 /* core dump is permitted */ 408#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ 409 410#define MMF_DUMPABLE_BITS 2 411#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) 412 413/* coredump filter bits */ 414#define MMF_DUMP_ANON_PRIVATE 2 415#define MMF_DUMP_ANON_SHARED 3 416#define MMF_DUMP_MAPPED_PRIVATE 4 417#define MMF_DUMP_MAPPED_SHARED 5 418#define MMF_DUMP_ELF_HEADERS 6 419#define MMF_DUMP_HUGETLB_PRIVATE 7 420#define MMF_DUMP_HUGETLB_SHARED 8 421 422#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 423#define MMF_DUMP_FILTER_BITS 7 424#define MMF_DUMP_FILTER_MASK \ 425 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 426#define MMF_DUMP_FILTER_DEFAULT \ 427 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ 428 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) 429 430#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS 431# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) 432#else 433# define MMF_DUMP_MASK_DEFAULT_ELF 0 434#endif 435 /* leave room for more dump flags */ 436#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ 437#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ 438 439#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) 440 441struct sighand_struct { 442 atomic_t count; 443 struct k_sigaction action[_NSIG]; 444 spinlock_t siglock; 445 wait_queue_head_t signalfd_wqh; 446}; 447 448struct pacct_struct { 449 int ac_flag; 450 long ac_exitcode; 451 unsigned long ac_mem; 452 cputime_t ac_utime, ac_stime; 453 unsigned long ac_minflt, ac_majflt; 454}; 455 456struct cpu_itimer { 457 cputime_t expires; 458 cputime_t incr; 459 u32 error; 460 u32 incr_error; 461}; 462 463/** 464 * struct task_cputime - collected CPU time counts 465 * @utime: time spent in user mode, in &cputime_t units 466 * @stime: time spent in kernel mode, in &cputime_t units 467 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 468 * 469 * This structure groups together three kinds of CPU time that are 470 * tracked for threads and thread groups. Most things considering 471 * CPU time want to group these counts together and treat all three 472 * of them in parallel. 473 */ 474struct task_cputime { 475 cputime_t utime; 476 cputime_t stime; 477 unsigned long long sum_exec_runtime; 478}; 479/* Alternate field names when used to cache expirations. */ 480#define prof_exp stime 481#define virt_exp utime 482#define sched_exp sum_exec_runtime 483 484#define INIT_CPUTIME \ 485 (struct task_cputime) { \ 486 .utime = cputime_zero, \ 487 .stime = cputime_zero, \ 488 .sum_exec_runtime = 0, \ 489 } 490 491/* 492 * Disable preemption until the scheduler is running. 493 * Reset by start_kernel()->sched_init()->init_idle(). 494 * 495 * We include PREEMPT_ACTIVE to avoid cond_resched() from working 496 * before the scheduler is active -- see should_resched(). 497 */ 498#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) 499 500/** 501 * struct thread_group_cputimer - thread group interval timer counts 502 * @cputime: thread group interval timers. 503 * @running: non-zero when there are timers running and 504 * @cputime receives updates. 505 * @lock: lock for fields in this struct. 506 * 507 * This structure contains the version of task_cputime, above, that is 508 * used for thread group CPU timer calculations. 509 */ 510struct thread_group_cputimer { 511 struct task_cputime cputime; 512 int running; 513 raw_spinlock_t lock; 514}; 515 516#include <linux/rwsem.h> 517struct autogroup; 518 519/* 520 * NOTE! "signal_struct" does not have its own 521 * locking, because a shared signal_struct always 522 * implies a shared sighand_struct, so locking 523 * sighand_struct is always a proper superset of 524 * the locking of signal_struct. 525 */ 526struct signal_struct { 527 atomic_t sigcnt; 528 atomic_t live; 529 int nr_threads; 530 531 wait_queue_head_t wait_chldexit; /* for wait4() */ 532 533 /* current thread group signal load-balancing target: */ 534 struct task_struct *curr_target; 535 536 /* shared signal handling: */ 537 struct sigpending shared_pending; 538 539 /* thread group exit support */ 540 int group_exit_code; 541 /* overloaded: 542 * - notify group_exit_task when ->count is equal to notify_count 543 * - everyone except group_exit_task is stopped during signal delivery 544 * of fatal signals, group_exit_task processes the signal. 545 */ 546 int notify_count; 547 struct task_struct *group_exit_task; 548 549 /* thread group stop support, overloads group_exit_code too */ 550 int group_stop_count; 551 unsigned int flags; /* see SIGNAL_* flags below */ 552 553 /* POSIX.1b Interval Timers */ 554 struct list_head posix_timers; 555 556 /* ITIMER_REAL timer for the process */ 557 struct hrtimer real_timer; 558 struct pid *leader_pid; 559 ktime_t it_real_incr; 560 561 /* 562 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use 563 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these 564 * values are defined to 0 and 1 respectively 565 */ 566 struct cpu_itimer it[2]; 567 568 /* 569 * Thread group totals for process CPU timers. 570 * See thread_group_cputimer(), et al, for details. 571 */ 572 struct thread_group_cputimer cputimer; 573 574 /* Earliest-expiration cache. */ 575 struct task_cputime cputime_expires; 576 577 struct list_head cpu_timers[3]; 578 579 struct pid *tty_old_pgrp; 580 581 /* boolean value for session group leader */ 582 int leader; 583 584 struct tty_struct *tty; /* NULL if no tty */ 585 586#ifdef CONFIG_SCHED_AUTOGROUP 587 struct autogroup *autogroup; 588#endif 589 /* 590 * Cumulative resource counters for dead threads in the group, 591 * and for reaped dead child processes forked by this group. 592 * Live threads maintain their own counters and add to these 593 * in __exit_signal, except for the group leader. 594 */ 595 cputime_t utime, stime, cutime, cstime; 596 cputime_t gtime; 597 cputime_t cgtime; 598#ifndef CONFIG_VIRT_CPU_ACCOUNTING 599 cputime_t prev_utime, prev_stime; 600#endif 601 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 602 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 603 unsigned long inblock, oublock, cinblock, coublock; 604 unsigned long maxrss, cmaxrss; 605 struct task_io_accounting ioac; 606 607 /* 608 * Cumulative ns of schedule CPU time fo dead threads in the 609 * group, not including a zombie group leader, (This only differs 610 * from jiffies_to_ns(utime + stime) if sched_clock uses something 611 * other than jiffies.) 612 */ 613 unsigned long long sum_sched_runtime; 614 615 /* 616 * We don't bother to synchronize most readers of this at all, 617 * because there is no reader checking a limit that actually needs 618 * to get both rlim_cur and rlim_max atomically, and either one 619 * alone is a single word that can safely be read normally. 620 * getrlimit/setrlimit use task_lock(current->group_leader) to 621 * protect this instead of the siglock, because they really 622 * have no need to disable irqs. 623 */ 624 struct rlimit rlim[RLIM_NLIMITS]; 625 626#ifdef CONFIG_BSD_PROCESS_ACCT 627 struct pacct_struct pacct; /* per-process accounting information */ 628#endif 629#ifdef CONFIG_TASKSTATS 630 struct taskstats *stats; 631#endif 632#ifdef CONFIG_AUDIT 633 unsigned audit_tty; 634 struct tty_audit_buf *tty_audit_buf; 635#endif 636#ifdef CONFIG_CGROUPS 637 /* 638 * group_rwsem prevents new tasks from entering the threadgroup and 639 * member tasks from exiting,a more specifically, setting of 640 * PF_EXITING. fork and exit paths are protected with this rwsem 641 * using threadgroup_change_begin/end(). Users which require 642 * threadgroup to remain stable should use threadgroup_[un]lock() 643 * which also takes care of exec path. Currently, cgroup is the 644 * only user. 645 */ 646 struct rw_semaphore group_rwsem; 647#endif 648 649 int oom_adj; /* OOM kill score adjustment (bit shift) */ 650 int oom_score_adj; /* OOM kill score adjustment */ 651 int oom_score_adj_min; /* OOM kill score adjustment minimum value. 652 * Only settable by CAP_SYS_RESOURCE. */ 653 654 struct mutex cred_guard_mutex; /* guard against foreign influences on 655 * credential calculations 656 * (notably. ptrace) */ 657}; 658 659/* Context switch must be unlocked if interrupts are to be enabled */ 660#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 661# define __ARCH_WANT_UNLOCKED_CTXSW 662#endif 663 664/* 665 * Bits in flags field of signal_struct. 666 */ 667#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 668#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 669#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 670/* 671 * Pending notifications to parent. 672 */ 673#define SIGNAL_CLD_STOPPED 0x00000010 674#define SIGNAL_CLD_CONTINUED 0x00000020 675#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 676 677#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 678 679/* If true, all threads except ->group_exit_task have pending SIGKILL */ 680static inline int signal_group_exit(const struct signal_struct *sig) 681{ 682 return (sig->flags & SIGNAL_GROUP_EXIT) || 683 (sig->group_exit_task != NULL); 684} 685 686/* 687 * Some day this will be a full-fledged user tracking system.. 688 */ 689struct user_struct { 690 atomic_t __count; /* reference count */ 691 atomic_t processes; /* How many processes does this user have? */ 692 atomic_t files; /* How many open files does this user have? */ 693 atomic_t sigpending; /* How many pending signals does this user have? */ 694#ifdef CONFIG_INOTIFY_USER 695 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 696 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 697#endif 698#ifdef CONFIG_FANOTIFY 699 atomic_t fanotify_listeners; 700#endif 701#ifdef CONFIG_EPOLL 702 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ 703#endif 704#ifdef CONFIG_POSIX_MQUEUE 705 /* protected by mq_lock */ 706 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 707#endif 708 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 709 710#ifdef CONFIG_KEYS 711 struct key *uid_keyring; /* UID specific keyring */ 712 struct key *session_keyring; /* UID's default session keyring */ 713#endif 714 715 /* Hash table maintenance information */ 716 struct hlist_node uidhash_node; 717 uid_t uid; 718 struct user_namespace *user_ns; 719 720#ifdef CONFIG_PERF_EVENTS 721 atomic_long_t locked_vm; 722#endif 723}; 724 725extern int uids_sysfs_init(void); 726 727extern struct user_struct *find_user(uid_t); 728 729extern struct user_struct root_user; 730#define INIT_USER (&root_user) 731 732 733struct backing_dev_info; 734struct reclaim_state; 735 736#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 737struct sched_info { 738 /* cumulative counters */ 739 unsigned long pcount; /* # of times run on this cpu */ 740 unsigned long long run_delay; /* time spent waiting on a runqueue */ 741 742 /* timestamps */ 743 unsigned long long last_arrival,/* when we last ran on a cpu */ 744 last_queued; /* when we were last queued to run */ 745}; 746#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 747 748#ifdef CONFIG_TASK_DELAY_ACCT 749struct task_delay_info { 750 spinlock_t lock; 751 unsigned int flags; /* Private per-task flags */ 752 753 /* For each stat XXX, add following, aligned appropriately 754 * 755 * struct timespec XXX_start, XXX_end; 756 * u64 XXX_delay; 757 * u32 XXX_count; 758 * 759 * Atomicity of updates to XXX_delay, XXX_count protected by 760 * single lock above (split into XXX_lock if contention is an issue). 761 */ 762 763 /* 764 * XXX_count is incremented on every XXX operation, the delay 765 * associated with the operation is added to XXX_delay. 766 * XXX_delay contains the accumulated delay time in nanoseconds. 767 */ 768 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 769 u64 blkio_delay; /* wait for sync block io completion */ 770 u64 swapin_delay; /* wait for swapin block io completion */ 771 u32 blkio_count; /* total count of the number of sync block */ 772 /* io operations performed */ 773 u32 swapin_count; /* total count of the number of swapin block */ 774 /* io operations performed */ 775 776 struct timespec freepages_start, freepages_end; 777 u64 freepages_delay; /* wait for memory reclaim */ 778 u32 freepages_count; /* total count of memory reclaim */ 779}; 780#endif /* CONFIG_TASK_DELAY_ACCT */ 781 782static inline int sched_info_on(void) 783{ 784#ifdef CONFIG_SCHEDSTATS 785 return 1; 786#elif defined(CONFIG_TASK_DELAY_ACCT) 787 extern int delayacct_on; 788 return delayacct_on; 789#else 790 return 0; 791#endif 792} 793 794enum cpu_idle_type { 795 CPU_IDLE, 796 CPU_NOT_IDLE, 797 CPU_NEWLY_IDLE, 798 CPU_MAX_IDLE_TYPES 799}; 800 801/* 802 * Increase resolution of nice-level calculations for 64-bit architectures. 803 * The extra resolution improves shares distribution and load balancing of 804 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 805 * hierarchies, especially on larger systems. This is not a user-visible change 806 * and does not change the user-interface for setting shares/weights. 807 * 808 * We increase resolution only if we have enough bits to allow this increased 809 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution 810 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the 811 * increased costs. 812 */ 813#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ 814# define SCHED_LOAD_RESOLUTION 10 815# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) 816# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) 817#else 818# define SCHED_LOAD_RESOLUTION 0 819# define scale_load(w) (w) 820# define scale_load_down(w) (w) 821#endif 822 823#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) 824#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 825 826/* 827 * Increase resolution of cpu_power calculations 828 */ 829#define SCHED_POWER_SHIFT 10 830#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) 831 832/* 833 * sched-domains (multiprocessor balancing) declarations: 834 */ 835#ifdef CONFIG_SMP 836#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ 837#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ 838#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ 839#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 840#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ 841#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 842#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ 843#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ 844#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ 845#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 846#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 847#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 848#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 849#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ 850 851enum powersavings_balance_level { 852 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 853 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package 854 * first for long running threads 855 */ 856 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle 857 * cpu package for power savings 858 */ 859 MAX_POWERSAVINGS_BALANCE_LEVELS 860}; 861 862extern int sched_mc_power_savings, sched_smt_power_savings; 863 864static inline int sd_balance_for_mc_power(void) 865{ 866 if (sched_smt_power_savings) 867 return SD_POWERSAVINGS_BALANCE; 868 869 if (!sched_mc_power_savings) 870 return SD_PREFER_SIBLING; 871 872 return 0; 873} 874 875static inline int sd_balance_for_package_power(void) 876{ 877 if (sched_mc_power_savings | sched_smt_power_savings) 878 return SD_POWERSAVINGS_BALANCE; 879 880 return SD_PREFER_SIBLING; 881} 882 883extern int __weak arch_sd_sibiling_asym_packing(void); 884 885/* 886 * Optimise SD flags for power savings: 887 * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. 888 * Keep default SD flags if sched_{smt,mc}_power_saving=0 889 */ 890 891static inline int sd_power_saving_flags(void) 892{ 893 if (sched_mc_power_savings | sched_smt_power_savings) 894 return SD_BALANCE_NEWIDLE; 895 896 return 0; 897} 898 899struct sched_group_power { 900 atomic_t ref; 901 /* 902 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 903 * single CPU. 904 */ 905 unsigned int power, power_orig; 906}; 907 908struct sched_group { 909 struct sched_group *next; /* Must be a circular list */ 910 atomic_t ref; 911 912 unsigned int group_weight; 913 struct sched_group_power *sgp; 914 915 /* 916 * The CPUs this group covers. 917 * 918 * NOTE: this field is variable length. (Allocated dynamically 919 * by attaching extra space to the end of the structure, 920 * depending on how many CPUs the kernel has booted up with) 921 */ 922 unsigned long cpumask[0]; 923}; 924 925static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 926{ 927 return to_cpumask(sg->cpumask); 928} 929 930struct sched_domain_attr { 931 int relax_domain_level; 932}; 933 934#define SD_ATTR_INIT (struct sched_domain_attr) { \ 935 .relax_domain_level = -1, \ 936} 937 938extern int sched_domain_level_max; 939 940struct sched_domain { 941 /* These fields must be setup */ 942 struct sched_domain *parent; /* top domain must be null terminated */ 943 struct sched_domain *child; /* bottom domain must be null terminated */ 944 struct sched_group *groups; /* the balancing groups of the domain */ 945 unsigned long min_interval; /* Minimum balance interval ms */ 946 unsigned long max_interval; /* Maximum balance interval ms */ 947 unsigned int busy_factor; /* less balancing by factor if busy */ 948 unsigned int imbalance_pct; /* No balance until over watermark */ 949 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 950 unsigned int busy_idx; 951 unsigned int idle_idx; 952 unsigned int newidle_idx; 953 unsigned int wake_idx; 954 unsigned int forkexec_idx; 955 unsigned int smt_gain; 956 int flags; /* See SD_* */ 957 int level; 958 959 /* Runtime fields. */ 960 unsigned long last_balance; /* init to jiffies. units in jiffies */ 961 unsigned int balance_interval; /* initialise to 1. units in ms. */ 962 unsigned int nr_balance_failed; /* initialise to 0 */ 963 964 u64 last_update; 965 966#ifdef CONFIG_SCHEDSTATS 967 /* load_balance() stats */ 968 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 969 unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 970 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 971 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; 972 unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 973 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 974 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 975 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 976 977 /* Active load balancing */ 978 unsigned int alb_count; 979 unsigned int alb_failed; 980 unsigned int alb_pushed; 981 982 /* SD_BALANCE_EXEC stats */ 983 unsigned int sbe_count; 984 unsigned int sbe_balanced; 985 unsigned int sbe_pushed; 986 987 /* SD_BALANCE_FORK stats */ 988 unsigned int sbf_count; 989 unsigned int sbf_balanced; 990 unsigned int sbf_pushed; 991 992 /* try_to_wake_up() stats */ 993 unsigned int ttwu_wake_remote; 994 unsigned int ttwu_move_affine; 995 unsigned int ttwu_move_balance; 996#endif 997#ifdef CONFIG_SCHED_DEBUG 998 char *name; 999#endif 1000 union { 1001 void *private; /* used during construction */ 1002 struct rcu_head rcu; /* used during destruction */ 1003 }; 1004 1005 unsigned int span_weight; 1006 /* 1007 * Span of all CPUs in this domain. 1008 * 1009 * NOTE: this field is variable length. (Allocated dynamically 1010 * by attaching extra space to the end of the structure, 1011 * depending on how many CPUs the kernel has booted up with) 1012 */ 1013 unsigned long span[0]; 1014}; 1015 1016static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 1017{ 1018 return to_cpumask(sd->span); 1019} 1020 1021extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1022 struct sched_domain_attr *dattr_new); 1023 1024/* Allocate an array of sched domains, for partition_sched_domains(). */ 1025cpumask_var_t *alloc_sched_domains(unsigned int ndoms); 1026void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); 1027 1028/* Test a flag in parent sched domain */ 1029static inline int test_sd_parent(struct sched_domain *sd, int flag) 1030{ 1031 if (sd->parent && (sd->parent->flags & flag)) 1032 return 1; 1033 1034 return 0; 1035} 1036 1037unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); 1038unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); 1039 1040#else /* CONFIG_SMP */ 1041 1042struct sched_domain_attr; 1043 1044static inline void 1045partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1046 struct sched_domain_attr *dattr_new) 1047{ 1048} 1049#endif /* !CONFIG_SMP */ 1050 1051 1052struct io_context; /* See blkdev.h */ 1053 1054 1055#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 1056extern void prefetch_stack(struct task_struct *t); 1057#else 1058static inline void prefetch_stack(struct task_struct *t) { } 1059#endif 1060 1061struct audit_context; /* See audit.c */ 1062struct mempolicy; 1063struct pipe_inode_info; 1064struct uts_namespace; 1065 1066struct rq; 1067struct sched_domain; 1068 1069/* 1070 * wake flags 1071 */ 1072#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1073#define WF_FORK 0x02 /* child wakeup after fork */ 1074#define WF_MIGRATED 0x04 /* internal use, task got migrated */ 1075 1076#define ENQUEUE_WAKEUP 1 1077#define ENQUEUE_HEAD 2 1078#ifdef CONFIG_SMP 1079#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ 1080#else 1081#define ENQUEUE_WAKING 0 1082#endif 1083 1084#define DEQUEUE_SLEEP 1 1085 1086struct sched_class { 1087 const struct sched_class *next; 1088 1089 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1090 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1091 void (*yield_task) (struct rq *rq); 1092 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); 1093 1094 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1095 1096 struct task_struct * (*pick_next_task) (struct rq *rq); 1097 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1098 1099#ifdef CONFIG_SMP 1100 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1101 1102 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1103 void (*post_schedule) (struct rq *this_rq); 1104 void (*task_waking) (struct task_struct *task); 1105 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1106 1107 void (*set_cpus_allowed)(struct task_struct *p, 1108 const struct cpumask *newmask); 1109 1110 void (*rq_online)(struct rq *rq); 1111 void (*rq_offline)(struct rq *rq); 1112#endif 1113 1114 void (*set_curr_task) (struct rq *rq); 1115 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1116 void (*task_fork) (struct task_struct *p); 1117 1118 void (*switched_from) (struct rq *this_rq, struct task_struct *task); 1119 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1120 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1121 int oldprio); 1122 1123 unsigned int (*get_rr_interval) (struct rq *rq, 1124 struct task_struct *task); 1125 1126#ifdef CONFIG_FAIR_GROUP_SCHED 1127 void (*task_move_group) (struct task_struct *p, int on_rq); 1128#endif 1129}; 1130 1131struct load_weight { 1132 unsigned long weight, inv_weight; 1133}; 1134 1135#ifdef CONFIG_SCHEDSTATS 1136struct sched_statistics { 1137 u64 wait_start; 1138 u64 wait_max; 1139 u64 wait_count; 1140 u64 wait_sum; 1141 u64 iowait_count; 1142 u64 iowait_sum; 1143 1144 u64 sleep_start; 1145 u64 sleep_max; 1146 s64 sum_sleep_runtime; 1147 1148 u64 block_start; 1149 u64 block_max; 1150 u64 exec_max; 1151 u64 slice_max; 1152 1153 u64 nr_migrations_cold; 1154 u64 nr_failed_migrations_affine; 1155 u64 nr_failed_migrations_running; 1156 u64 nr_failed_migrations_hot; 1157 u64 nr_forced_migrations; 1158 1159 u64 nr_wakeups; 1160 u64 nr_wakeups_sync; 1161 u64 nr_wakeups_migrate; 1162 u64 nr_wakeups_local; 1163 u64 nr_wakeups_remote; 1164 u64 nr_wakeups_affine; 1165 u64 nr_wakeups_affine_attempts; 1166 u64 nr_wakeups_passive; 1167 u64 nr_wakeups_idle; 1168}; 1169#endif 1170 1171struct sched_entity { 1172 struct load_weight load; /* for load-balancing */ 1173 struct rb_node run_node; 1174 struct list_head group_node; 1175 unsigned int on_rq; 1176 1177 u64 exec_start; 1178 u64 sum_exec_runtime; 1179 u64 vruntime; 1180 u64 prev_sum_exec_runtime; 1181 1182 u64 nr_migrations; 1183 1184#ifdef CONFIG_SCHEDSTATS 1185 struct sched_statistics statistics; 1186#endif 1187 1188#ifdef CONFIG_FAIR_GROUP_SCHED 1189 struct sched_entity *parent; 1190 /* rq on which this entity is (to be) queued: */ 1191 struct cfs_rq *cfs_rq; 1192 /* rq "owned" by this entity/group: */ 1193 struct cfs_rq *my_q; 1194#endif 1195}; 1196 1197struct sched_rt_entity { 1198 struct list_head run_list; 1199 unsigned long timeout; 1200 unsigned int time_slice; 1201 int nr_cpus_allowed; 1202 1203 struct sched_rt_entity *back; 1204#ifdef CONFIG_RT_GROUP_SCHED 1205 struct sched_rt_entity *parent; 1206 /* rq on which this entity is (to be) queued: */ 1207 struct rt_rq *rt_rq; 1208 /* rq "owned" by this entity/group: */ 1209 struct rt_rq *my_q; 1210#endif 1211}; 1212 1213struct rcu_node; 1214 1215enum perf_event_task_context { 1216 perf_invalid_context = -1, 1217 perf_hw_context = 0, 1218 perf_sw_context, 1219 perf_nr_task_contexts, 1220}; 1221 1222struct task_struct { 1223 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1224 void *stack; 1225 atomic_t usage; 1226 unsigned int flags; /* per process flags, defined below */ 1227 unsigned int ptrace; 1228 1229#ifdef CONFIG_SMP 1230 struct llist_node wake_entry; 1231 int on_cpu; 1232#endif 1233 int on_rq; 1234 1235 int prio, static_prio, normal_prio; 1236 unsigned int rt_priority; 1237 const struct sched_class *sched_class; 1238 struct sched_entity se; 1239 struct sched_rt_entity rt; 1240 1241#ifdef CONFIG_PREEMPT_NOTIFIERS 1242 /* list of struct preempt_notifier: */ 1243 struct hlist_head preempt_notifiers; 1244#endif 1245 1246 /* 1247 * fpu_counter contains the number of consecutive context switches 1248 * that the FPU is used. If this is over a threshold, the lazy fpu 1249 * saving becomes unlazy to save the trap. This is an unsigned char 1250 * so that after 256 times the counter wraps and the behavior turns 1251 * lazy again; this to deal with bursty apps that only use FPU for 1252 * a short time 1253 */ 1254 unsigned char fpu_counter; 1255#ifdef CONFIG_BLK_DEV_IO_TRACE 1256 unsigned int btrace_seq; 1257#endif 1258 1259 unsigned int policy; 1260 cpumask_t cpus_allowed; 1261 1262#ifdef CONFIG_PREEMPT_RCU 1263 int rcu_read_lock_nesting; 1264 char rcu_read_unlock_special; 1265 struct list_head rcu_node_entry; 1266#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1267#ifdef CONFIG_TREE_PREEMPT_RCU 1268 struct rcu_node *rcu_blocked_node; 1269#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1270#ifdef CONFIG_RCU_BOOST 1271 struct rt_mutex *rcu_boost_mutex; 1272#endif /* #ifdef CONFIG_RCU_BOOST */ 1273 1274#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1275 struct sched_info sched_info; 1276#endif 1277 1278 struct list_head tasks; 1279#ifdef CONFIG_SMP 1280 struct plist_node pushable_tasks; 1281#endif 1282 1283 struct mm_struct *mm, *active_mm; 1284#ifdef CONFIG_COMPAT_BRK 1285 unsigned brk_randomized:1; 1286#endif 1287#if defined(SPLIT_RSS_COUNTING) 1288 struct task_rss_stat rss_stat; 1289#endif 1290/* task state */ 1291 int exit_state; 1292 int exit_code, exit_signal; 1293 int pdeath_signal; /* The signal sent when the parent dies */ 1294 unsigned int jobctl; /* JOBCTL_*, siglock protected */ 1295 /* ??? */ 1296 unsigned int personality; 1297 unsigned did_exec:1; 1298 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1299 * execve */ 1300 unsigned in_iowait:1; 1301 1302 1303 /* Revert to default priority/policy when forking */ 1304 unsigned sched_reset_on_fork:1; 1305 unsigned sched_contributes_to_load:1; 1306 1307 pid_t pid; 1308 pid_t tgid; 1309 1310#ifdef CONFIG_CC_STACKPROTECTOR 1311 /* Canary value for the -fstack-protector gcc feature */ 1312 unsigned long stack_canary; 1313#endif 1314 1315 /* 1316 * pointers to (original) parent process, youngest child, younger sibling, 1317 * older sibling, respectively. (p->father can be replaced with 1318 * p->real_parent->pid) 1319 */ 1320 struct task_struct *real_parent; /* real parent process */ 1321 struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ 1322 /* 1323 * children/sibling forms the list of my natural children 1324 */ 1325 struct list_head children; /* list of my children */ 1326 struct list_head sibling; /* linkage in my parent's children list */ 1327 struct task_struct *group_leader; /* threadgroup leader */ 1328 1329 /* 1330 * ptraced is the list of tasks this task is using ptrace on. 1331 * This includes both natural children and PTRACE_ATTACH targets. 1332 * p->ptrace_entry is p's link on the p->parent->ptraced list. 1333 */ 1334 struct list_head ptraced; 1335 struct list_head ptrace_entry; 1336 1337 /* PID/PID hash table linkage. */ 1338 struct pid_link pids[PIDTYPE_MAX]; 1339 struct list_head thread_group; 1340 1341 struct completion *vfork_done; /* for vfork() */ 1342 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1343 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1344 1345 cputime_t utime, stime, utimescaled, stimescaled; 1346 cputime_t gtime; 1347#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1348 cputime_t prev_utime, prev_stime; 1349#endif 1350 unsigned long nvcsw, nivcsw; /* context switch counts */ 1351 struct timespec start_time; /* monotonic time */ 1352 struct timespec real_start_time; /* boot based time */ 1353/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1354 unsigned long min_flt, maj_flt; 1355 1356 struct task_cputime cputime_expires; 1357 struct list_head cpu_timers[3]; 1358 1359/* process credentials */ 1360 const struct cred __rcu *real_cred; /* objective and real subjective task 1361 * credentials (COW) */ 1362 const struct cred __rcu *cred; /* effective (overridable) subjective task 1363 * credentials (COW) */ 1364 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ 1365 1366 char comm[TASK_COMM_LEN]; /* executable name excluding path 1367 - access with [gs]et_task_comm (which lock 1368 it with task_lock()) 1369 - initialized normally by setup_new_exec */ 1370/* file system info */ 1371 int link_count, total_link_count; 1372#ifdef CONFIG_SYSVIPC 1373/* ipc stuff */ 1374 struct sysv_sem sysvsem; 1375#endif 1376#ifdef CONFIG_DETECT_HUNG_TASK 1377/* hung task detection */ 1378 unsigned long last_switch_count; 1379#endif 1380/* CPU-specific state of this task */ 1381 struct thread_struct thread; 1382/* filesystem information */ 1383 struct fs_struct *fs; 1384/* open file information */ 1385 struct files_struct *files; 1386/* namespaces */ 1387 struct nsproxy *nsproxy; 1388/* signal handlers */ 1389 struct signal_struct *signal; 1390 struct sighand_struct *sighand; 1391 1392 sigset_t blocked, real_blocked; 1393 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 1394 struct sigpending pending; 1395 1396 unsigned long sas_ss_sp; 1397 size_t sas_ss_size; 1398 int (*notifier)(void *priv); 1399 void *notifier_data; 1400 sigset_t *notifier_mask; 1401 struct audit_context *audit_context; 1402#ifdef CONFIG_AUDITSYSCALL 1403 uid_t loginuid; 1404 unsigned int sessionid; 1405#endif 1406 seccomp_t seccomp; 1407 1408/* Thread group tracking */ 1409 u32 parent_exec_id; 1410 u32 self_exec_id; 1411/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, 1412 * mempolicy */ 1413 spinlock_t alloc_lock; 1414 1415#ifdef CONFIG_GENERIC_HARDIRQS 1416 /* IRQ handler threads */ 1417 struct irqaction *irqaction; 1418#endif 1419 1420 /* Protection of the PI data structures: */ 1421 raw_spinlock_t pi_lock; 1422 1423#ifdef CONFIG_RT_MUTEXES 1424 /* PI waiters blocked on a rt_mutex held by this task */ 1425 struct plist_head pi_waiters; 1426 /* Deadlock detection and priority inheritance handling */ 1427 struct rt_mutex_waiter *pi_blocked_on; 1428#endif 1429 1430#ifdef CONFIG_DEBUG_MUTEXES 1431 /* mutex deadlock detection */ 1432 struct mutex_waiter *blocked_on; 1433#endif 1434#ifdef CONFIG_TRACE_IRQFLAGS 1435 unsigned int irq_events; 1436 unsigned long hardirq_enable_ip; 1437 unsigned long hardirq_disable_ip; 1438 unsigned int hardirq_enable_event; 1439 unsigned int hardirq_disable_event; 1440 int hardirqs_enabled; 1441 int hardirq_context; 1442 unsigned long softirq_disable_ip; 1443 unsigned long softirq_enable_ip; 1444 unsigned int softirq_disable_event; 1445 unsigned int softirq_enable_event; 1446 int softirqs_enabled; 1447 int softirq_context; 1448#endif 1449#ifdef CONFIG_LOCKDEP 1450# define MAX_LOCK_DEPTH 48UL 1451 u64 curr_chain_key; 1452 int lockdep_depth; 1453 unsigned int lockdep_recursion; 1454 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1455 gfp_t lockdep_reclaim_gfp; 1456#endif 1457 1458/* journalling filesystem info */ 1459 void *journal_info; 1460 1461/* stacked block device info */ 1462 struct bio_list *bio_list; 1463 1464#ifdef CONFIG_BLOCK 1465/* stack plugging */ 1466 struct blk_plug *plug; 1467#endif 1468 1469/* VM state */ 1470 struct reclaim_state *reclaim_state; 1471 1472 struct backing_dev_info *backing_dev_info; 1473 1474 struct io_context *io_context; 1475 1476 unsigned long ptrace_message; 1477 siginfo_t *last_siginfo; /* For ptrace use. */ 1478 struct task_io_accounting ioac; 1479#if defined(CONFIG_TASK_XACCT) 1480 u64 acct_rss_mem1; /* accumulated rss usage */ 1481 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1482 cputime_t acct_timexpd; /* stime + utime since last update */ 1483#endif 1484#ifdef CONFIG_CPUSETS 1485 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1486 int mems_allowed_change_disable; 1487 int cpuset_mem_spread_rotor; 1488 int cpuset_slab_spread_rotor; 1489#endif 1490#ifdef CONFIG_CGROUPS 1491 /* Control Group info protected by css_set_lock */ 1492 struct css_set __rcu *cgroups; 1493 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1494 struct list_head cg_list; 1495#endif 1496#ifdef CONFIG_FUTEX 1497 struct robust_list_head __user *robust_list; 1498#ifdef CONFIG_COMPAT 1499 struct compat_robust_list_head __user *compat_robust_list; 1500#endif 1501 struct list_head pi_state_list; 1502 struct futex_pi_state *pi_state_cache; 1503#endif 1504#ifdef CONFIG_PERF_EVENTS 1505 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 1506 struct mutex perf_event_mutex; 1507 struct list_head perf_event_list; 1508#endif 1509#ifdef CONFIG_NUMA 1510 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1511 short il_next; 1512 short pref_node_fork; 1513#endif 1514 struct rcu_head rcu; 1515 1516 /* 1517 * cache last used pipe for splice 1518 */ 1519 struct pipe_inode_info *splice_pipe; 1520#ifdef CONFIG_TASK_DELAY_ACCT 1521 struct task_delay_info *delays; 1522#endif 1523#ifdef CONFIG_FAULT_INJECTION 1524 int make_it_fail; 1525#endif 1526 /* 1527 * when (nr_dirtied >= nr_dirtied_pause), it's time to call 1528 * balance_dirty_pages() for some dirty throttling pause 1529 */ 1530 int nr_dirtied; 1531 int nr_dirtied_pause; 1532 1533#ifdef CONFIG_LATENCYTOP 1534 int latency_record_count; 1535 struct latency_record latency_record[LT_SAVECOUNT]; 1536#endif 1537 /* 1538 * time slack values; these are used to round up poll() and 1539 * select() etc timeout values. These are in nanoseconds. 1540 */ 1541 unsigned long timer_slack_ns; 1542 unsigned long default_timer_slack_ns; 1543 1544 struct list_head *scm_work_list; 1545#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1546 /* Index of current stored address in ret_stack */ 1547 int curr_ret_stack; 1548 /* Stack of return addresses for return function tracing */ 1549 struct ftrace_ret_stack *ret_stack; 1550 /* time stamp for last schedule */ 1551 unsigned long long ftrace_timestamp; 1552 /* 1553 * Number of functions that haven't been traced 1554 * because of depth overrun. 1555 */ 1556 atomic_t trace_overrun; 1557 /* Pause for the tracing */ 1558 atomic_t tracing_graph_pause; 1559#endif 1560#ifdef CONFIG_TRACING 1561 /* state flags for use by tracers */ 1562 unsigned long trace; 1563 /* bitmask and counter of trace recursion */ 1564 unsigned long trace_recursion; 1565#endif /* CONFIG_TRACING */ 1566#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ 1567 struct memcg_batch_info { 1568 int do_batch; /* incremented when batch uncharge started */ 1569 struct mem_cgroup *memcg; /* target memcg of uncharge */ 1570 unsigned long nr_pages; /* uncharged usage */ 1571 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ 1572 } memcg_batch; 1573#endif 1574#ifdef CONFIG_HAVE_HW_BREAKPOINT 1575 atomic_t ptrace_bp_refcnt; 1576#endif 1577}; 1578 1579/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1580#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1581 1582/* 1583 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1584 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH 1585 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority 1586 * values are inverted: lower p->prio value means higher priority. 1587 * 1588 * The MAX_USER_RT_PRIO value allows the actual maximum 1589 * RT priority to be separate from the value exported to 1590 * user-space. This allows kernel threads to set their 1591 * priority to a value higher than any user task. Note: 1592 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. 1593 */ 1594 1595#define MAX_USER_RT_PRIO 100 1596#define MAX_RT_PRIO MAX_USER_RT_PRIO 1597 1598#define MAX_PRIO (MAX_RT_PRIO + 40) 1599#define DEFAULT_PRIO (MAX_RT_PRIO + 20) 1600 1601static inline int rt_prio(int prio) 1602{ 1603 if (unlikely(prio < MAX_RT_PRIO)) 1604 return 1; 1605 return 0; 1606} 1607 1608static inline int rt_task(struct task_struct *p) 1609{ 1610 return rt_prio(p->prio); 1611} 1612 1613static inline struct pid *task_pid(struct task_struct *task) 1614{ 1615 return task->pids[PIDTYPE_PID].pid; 1616} 1617 1618static inline struct pid *task_tgid(struct task_struct *task) 1619{ 1620 return task->group_leader->pids[PIDTYPE_PID].pid; 1621} 1622 1623/* 1624 * Without tasklist or rcu lock it is not safe to dereference 1625 * the result of task_pgrp/task_session even if task == current, 1626 * we can race with another thread doing sys_setsid/sys_setpgid. 1627 */ 1628static inline struct pid *task_pgrp(struct task_struct *task) 1629{ 1630 return task->group_leader->pids[PIDTYPE_PGID].pid; 1631} 1632 1633static inline struct pid *task_session(struct task_struct *task) 1634{ 1635 return task->group_leader->pids[PIDTYPE_SID].pid; 1636} 1637 1638struct pid_namespace; 1639 1640/* 1641 * the helpers to get the task's different pids as they are seen 1642 * from various namespaces 1643 * 1644 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1645 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1646 * current. 1647 * task_xid_nr_ns() : id seen from the ns specified; 1648 * 1649 * set_task_vxid() : assigns a virtual id to a task; 1650 * 1651 * see also pid_nr() etc in include/linux/pid.h 1652 */ 1653pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 1654 struct pid_namespace *ns); 1655 1656static inline pid_t task_pid_nr(struct task_struct *tsk) 1657{ 1658 return tsk->pid; 1659} 1660 1661static inline pid_t task_pid_nr_ns(struct task_struct *tsk, 1662 struct pid_namespace *ns) 1663{ 1664 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1665} 1666 1667static inline pid_t task_pid_vnr(struct task_struct *tsk) 1668{ 1669 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1670} 1671 1672 1673static inline pid_t task_tgid_nr(struct task_struct *tsk) 1674{ 1675 return tsk->tgid; 1676} 1677 1678pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1679 1680static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1681{ 1682 return pid_vnr(task_tgid(tsk)); 1683} 1684 1685 1686static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, 1687 struct pid_namespace *ns) 1688{ 1689 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1690} 1691 1692static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1693{ 1694 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1695} 1696 1697 1698static inline pid_t task_session_nr_ns(struct task_struct *tsk, 1699 struct pid_namespace *ns) 1700{ 1701 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1702} 1703 1704static inline pid_t task_session_vnr(struct task_struct *tsk) 1705{ 1706 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1707} 1708 1709/* obsolete, do not use */ 1710static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1711{ 1712 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1713} 1714 1715/** 1716 * pid_alive - check that a task structure is not stale 1717 * @p: Task structure to be checked. 1718 * 1719 * Test if a process is not yet dead (at most zombie state) 1720 * If pid_alive fails, then pointers within the task structure 1721 * can be stale and must not be dereferenced. 1722 */ 1723static inline int pid_alive(struct task_struct *p) 1724{ 1725 return p->pids[PIDTYPE_PID].pid != NULL; 1726} 1727 1728/** 1729 * is_global_init - check if a task structure is init 1730 * @tsk: Task structure to be checked. 1731 * 1732 * Check if a task structure is the first user space task the kernel created. 1733 */ 1734static inline int is_global_init(struct task_struct *tsk) 1735{ 1736 return tsk->pid == 1; 1737} 1738 1739/* 1740 * is_container_init: 1741 * check whether in the task is init in its own pid namespace. 1742 */ 1743extern int is_container_init(struct task_struct *tsk); 1744 1745extern struct pid *cad_pid; 1746 1747extern void free_task(struct task_struct *tsk); 1748#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 1749 1750extern void __put_task_struct(struct task_struct *t); 1751 1752static inline void put_task_struct(struct task_struct *t) 1753{ 1754 if (atomic_dec_and_test(&t->usage)) 1755 __put_task_struct(t); 1756} 1757 1758extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); 1759extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); 1760 1761/* 1762 * Per process flags 1763 */ 1764#define PF_STARTING 0x00000002 /* being created */ 1765#define PF_EXITING 0x00000004 /* getting shut down */ 1766#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1767#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1768#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1769#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1770#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 1771#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1772#define PF_DUMPCORE 0x00000200 /* dumped core */ 1773#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1774#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1775#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 1776#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1777#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1778#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1779#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1780#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1781#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1782#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1783#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1784#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1785#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1786#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1787#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ 1788#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1789#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1790#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1791#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1792 1793/* 1794 * Only the _current_ task can read/write to tsk->flags, but other 1795 * tasks can access tsk->flags in readonly mode for example 1796 * with tsk_used_math (like during threaded core dumping). 1797 * There is however an exception to this rule during ptrace 1798 * or during fork: the ptracer task is allowed to write to the 1799 * child->flags of its traced child (same goes for fork, the parent 1800 * can write to the child->flags), because we're guaranteed the 1801 * child is not running and in turn not changing child->flags 1802 * at the same time the parent does it. 1803 */ 1804#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1805#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1806#define clear_used_math() clear_stopped_child_used_math(current) 1807#define set_used_math() set_stopped_child_used_math(current) 1808#define conditional_stopped_child_used_math(condition, child) \ 1809 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1810#define conditional_used_math(condition) \ 1811 conditional_stopped_child_used_math(condition, current) 1812#define copy_to_stopped_child_used_math(child) \ 1813 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1814/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1815#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1816#define used_math() tsk_used_math(current) 1817 1818/* 1819 * task->jobctl flags 1820 */ 1821#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ 1822 1823#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ 1824#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ 1825#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ 1826#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ 1827#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ 1828#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ 1829#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ 1830 1831#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) 1832#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) 1833#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) 1834#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) 1835#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) 1836#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) 1837#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) 1838 1839#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) 1840#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) 1841 1842extern bool task_set_jobctl_pending(struct task_struct *task, 1843 unsigned int mask); 1844extern void task_clear_jobctl_trapping(struct task_struct *task); 1845extern void task_clear_jobctl_pending(struct task_struct *task, 1846 unsigned int mask); 1847 1848#ifdef CONFIG_PREEMPT_RCU 1849 1850#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1851#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ 1852#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */ 1853 1854static inline void rcu_copy_process(struct task_struct *p) 1855{ 1856 p->rcu_read_lock_nesting = 0; 1857 p->rcu_read_unlock_special = 0; 1858#ifdef CONFIG_TREE_PREEMPT_RCU 1859 p->rcu_blocked_node = NULL; 1860#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1861#ifdef CONFIG_RCU_BOOST 1862 p->rcu_boost_mutex = NULL; 1863#endif /* #ifdef CONFIG_RCU_BOOST */ 1864 INIT_LIST_HEAD(&p->rcu_node_entry); 1865} 1866 1867#else 1868 1869static inline void rcu_copy_process(struct task_struct *p) 1870{ 1871} 1872 1873#endif 1874 1875#ifdef CONFIG_SMP 1876extern void do_set_cpus_allowed(struct task_struct *p, 1877 const struct cpumask *new_mask); 1878 1879extern int set_cpus_allowed_ptr(struct task_struct *p, 1880 const struct cpumask *new_mask); 1881#else 1882static inline void do_set_cpus_allowed(struct task_struct *p, 1883 const struct cpumask *new_mask) 1884{ 1885} 1886static inline int set_cpus_allowed_ptr(struct task_struct *p, 1887 const struct cpumask *new_mask) 1888{ 1889 if (!cpumask_test_cpu(0, new_mask)) 1890 return -EINVAL; 1891 return 0; 1892} 1893#endif 1894 1895#ifndef CONFIG_CPUMASK_OFFSTACK 1896static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1897{ 1898 return set_cpus_allowed_ptr(p, &new_mask); 1899} 1900#endif 1901 1902/* 1903 * Do not use outside of architecture code which knows its limitations. 1904 * 1905 * sched_clock() has no promise of monotonicity or bounded drift between 1906 * CPUs, use (which you should not) requires disabling IRQs. 1907 * 1908 * Please use one of the three interfaces below. 1909 */ 1910extern unsigned long long notrace sched_clock(void); 1911/* 1912 * See the comment in kernel/sched_clock.c 1913 */ 1914extern u64 cpu_clock(int cpu); 1915extern u64 local_clock(void); 1916extern u64 sched_clock_cpu(int cpu); 1917 1918 1919extern void sched_clock_init(void); 1920 1921#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1922static inline void sched_clock_tick(void) 1923{ 1924} 1925 1926static inline void sched_clock_idle_sleep_event(void) 1927{ 1928} 1929 1930static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 1931{ 1932} 1933#else 1934/* 1935 * Architectures can set this to 1 if they have specified 1936 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 1937 * but then during bootup it turns out that sched_clock() 1938 * is reliable after all: 1939 */ 1940extern int sched_clock_stable; 1941 1942extern void sched_clock_tick(void); 1943extern void sched_clock_idle_sleep_event(void); 1944extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1945#endif 1946 1947#ifdef CONFIG_IRQ_TIME_ACCOUNTING 1948/* 1949 * An i/f to runtime opt-in for irq time accounting based off of sched_clock. 1950 * The reason for this explicit opt-in is not to have perf penalty with 1951 * slow sched_clocks. 1952 */ 1953extern void enable_sched_clock_irqtime(void); 1954extern void disable_sched_clock_irqtime(void); 1955#else 1956static inline void enable_sched_clock_irqtime(void) {} 1957static inline void disable_sched_clock_irqtime(void) {} 1958#endif 1959 1960extern unsigned long long 1961task_sched_runtime(struct task_struct *task); 1962 1963/* sched_exec is called by processes performing an exec */ 1964#ifdef CONFIG_SMP 1965extern void sched_exec(void); 1966#else 1967#define sched_exec() {} 1968#endif 1969 1970extern void sched_clock_idle_sleep_event(void); 1971extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1972 1973#ifdef CONFIG_HOTPLUG_CPU 1974extern void idle_task_exit(void); 1975#else 1976static inline void idle_task_exit(void) {} 1977#endif 1978 1979#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 1980extern void wake_up_idle_cpu(int cpu); 1981#else 1982static inline void wake_up_idle_cpu(int cpu) { } 1983#endif 1984 1985extern unsigned int sysctl_sched_latency; 1986extern unsigned int sysctl_sched_min_granularity; 1987extern unsigned int sysctl_sched_wakeup_granularity; 1988extern unsigned int sysctl_sched_child_runs_first; 1989 1990enum sched_tunable_scaling { 1991 SCHED_TUNABLESCALING_NONE, 1992 SCHED_TUNABLESCALING_LOG, 1993 SCHED_TUNABLESCALING_LINEAR, 1994 SCHED_TUNABLESCALING_END, 1995}; 1996extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; 1997 1998#ifdef CONFIG_SCHED_DEBUG 1999extern unsigned int sysctl_sched_migration_cost; 2000extern unsigned int sysctl_sched_nr_migrate; 2001extern unsigned int sysctl_sched_time_avg; 2002extern unsigned int sysctl_timer_migration; 2003extern unsigned int sysctl_sched_shares_window; 2004 2005int sched_proc_update_handler(struct ctl_table *table, int write, 2006 void __user *buffer, size_t *length, 2007 loff_t *ppos); 2008#endif 2009#ifdef CONFIG_SCHED_DEBUG 2010static inline unsigned int get_sysctl_timer_migration(void) 2011{ 2012 return sysctl_timer_migration; 2013} 2014#else 2015static inline unsigned int get_sysctl_timer_migration(void) 2016{ 2017 return 1; 2018} 2019#endif 2020extern unsigned int sysctl_sched_rt_period; 2021extern int sysctl_sched_rt_runtime; 2022 2023int sched_rt_handler(struct ctl_table *table, int write, 2024 void __user *buffer, size_t *lenp, 2025 loff_t *ppos); 2026 2027#ifdef CONFIG_SCHED_AUTOGROUP 2028extern unsigned int sysctl_sched_autogroup_enabled; 2029 2030extern void sched_autogroup_create_attach(struct task_struct *p); 2031extern void sched_autogroup_detach(struct task_struct *p); 2032extern void sched_autogroup_fork(struct signal_struct *sig); 2033extern void sched_autogroup_exit(struct signal_struct *sig); 2034#ifdef CONFIG_PROC_FS 2035extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2036extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); 2037#endif 2038#else 2039static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2040static inline void sched_autogroup_detach(struct task_struct *p) { } 2041static inline void sched_autogroup_fork(struct signal_struct *sig) { } 2042static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2043#endif 2044 2045#ifdef CONFIG_CFS_BANDWIDTH 2046extern unsigned int sysctl_sched_cfs_bandwidth_slice; 2047#endif 2048 2049#ifdef CONFIG_RT_MUTEXES 2050extern int rt_mutex_getprio(struct task_struct *p); 2051extern void rt_mutex_setprio(struct task_struct *p, int prio); 2052extern void rt_mutex_adjust_pi(struct task_struct *p); 2053#else 2054static inline int rt_mutex_getprio(struct task_struct *p) 2055{ 2056 return p->normal_prio; 2057} 2058# define rt_mutex_adjust_pi(p) do { } while (0) 2059#endif 2060 2061extern bool yield_to(struct task_struct *p, bool preempt); 2062extern void set_user_nice(struct task_struct *p, long nice); 2063extern int task_prio(const struct task_struct *p); 2064extern int task_nice(const struct task_struct *p); 2065extern int can_nice(const struct task_struct *p, const int nice); 2066extern int task_curr(const struct task_struct *p); 2067extern int idle_cpu(int cpu); 2068extern int sched_setscheduler(struct task_struct *, int, 2069 const struct sched_param *); 2070extern int sched_setscheduler_nocheck(struct task_struct *, int, 2071 const struct sched_param *); 2072extern struct task_struct *idle_task(int cpu); 2073extern struct task_struct *curr_task(int cpu); 2074extern void set_curr_task(int cpu, struct task_struct *p); 2075 2076void yield(void); 2077 2078/* 2079 * The default (Linux) execution domain. 2080 */ 2081extern struct exec_domain default_exec_domain; 2082 2083union thread_union { 2084 struct thread_info thread_info; 2085 unsigned long stack[THREAD_SIZE/sizeof(long)]; 2086}; 2087 2088#ifndef __HAVE_ARCH_KSTACK_END 2089static inline int kstack_end(void *addr) 2090{ 2091 /* Reliable end of stack detection: 2092 * Some APM bios versions misalign the stack 2093 */ 2094 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 2095} 2096#endif 2097 2098extern union thread_union init_thread_union; 2099extern struct task_struct init_task; 2100 2101extern struct mm_struct init_mm; 2102 2103extern struct pid_namespace init_pid_ns; 2104 2105/* 2106 * find a task by one of its numerical ids 2107 * 2108 * find_task_by_pid_ns(): 2109 * finds a task by its pid in the specified namespace 2110 * find_task_by_vpid(): 2111 * finds a task by its virtual pid 2112 * 2113 * see also find_vpid() etc in include/linux/pid.h 2114 */ 2115 2116extern struct task_struct *find_task_by_vpid(pid_t nr); 2117extern struct task_struct *find_task_by_pid_ns(pid_t nr, 2118 struct pid_namespace *ns); 2119 2120extern void __set_special_pids(struct pid *pid); 2121 2122/* per-UID process charging. */ 2123extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); 2124static inline struct user_struct *get_uid(struct user_struct *u) 2125{ 2126 atomic_inc(&u->__count); 2127 return u; 2128} 2129extern void free_uid(struct user_struct *); 2130extern void release_uids(struct user_namespace *ns); 2131 2132#include <asm/current.h> 2133 2134extern void xtime_update(unsigned long ticks); 2135 2136extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2137extern int wake_up_process(struct task_struct *tsk); 2138extern void wake_up_new_task(struct task_struct *tsk); 2139#ifdef CONFIG_SMP 2140 extern void kick_process(struct task_struct *tsk); 2141#else 2142 static inline void kick_process(struct task_struct *tsk) { } 2143#endif 2144extern void sched_fork(struct task_struct *p); 2145extern void sched_dead(struct task_struct *p); 2146 2147extern void proc_caches_init(void); 2148extern void flush_signals(struct task_struct *); 2149extern void __flush_signals(struct task_struct *); 2150extern void ignore_signals(struct task_struct *); 2151extern void flush_signal_handlers(struct task_struct *, int force_default); 2152extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 2153 2154static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 2155{ 2156 unsigned long flags; 2157 int ret; 2158 2159 spin_lock_irqsave(&tsk->sighand->siglock, flags); 2160 ret = dequeue_signal(tsk, mask, info); 2161 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2162 2163 return ret; 2164} 2165 2166extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2167 sigset_t *mask); 2168extern void unblock_all_signals(void); 2169extern void release_task(struct task_struct * p); 2170extern int send_sig_info(int, struct siginfo *, struct task_struct *); 2171extern int force_sigsegv(int, struct task_struct *); 2172extern int force_sig_info(int, struct siginfo *, struct task_struct *); 2173extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 2174extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 2175extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, 2176 const struct cred *, u32); 2177extern int kill_pgrp(struct pid *pid, int sig, int priv); 2178extern int kill_pid(struct pid *pid, int sig, int priv); 2179extern int kill_proc_info(int, struct siginfo *, pid_t); 2180extern __must_check bool do_notify_parent(struct task_struct *, int); 2181extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2182extern void force_sig(int, struct task_struct *); 2183extern int send_sig(int, struct task_struct *, int); 2184extern int zap_other_threads(struct task_struct *p); 2185extern struct sigqueue *sigqueue_alloc(void); 2186extern void sigqueue_free(struct sigqueue *); 2187extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2188extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 2189extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); 2190 2191static inline int kill_cad_pid(int sig, int priv) 2192{ 2193 return kill_pid(cad_pid, sig, priv); 2194} 2195 2196/* These can be the second arg to send_sig_info/send_group_sig_info. */ 2197#define SEND_SIG_NOINFO ((struct siginfo *) 0) 2198#define SEND_SIG_PRIV ((struct siginfo *) 1) 2199#define SEND_SIG_FORCED ((struct siginfo *) 2) 2200 2201/* 2202 * True if we are on the alternate signal stack. 2203 */ 2204static inline int on_sig_stack(unsigned long sp) 2205{ 2206#ifdef CONFIG_STACK_GROWSUP 2207 return sp >= current->sas_ss_sp && 2208 sp - current->sas_ss_sp < current->sas_ss_size; 2209#else 2210 return sp > current->sas_ss_sp && 2211 sp - current->sas_ss_sp <= current->sas_ss_size; 2212#endif 2213} 2214 2215static inline int sas_ss_flags(unsigned long sp) 2216{ 2217 return (current->sas_ss_size == 0 ? SS_DISABLE 2218 : on_sig_stack(sp) ? SS_ONSTACK : 0); 2219} 2220 2221/* 2222 * Routines for handling mm_structs 2223 */ 2224extern struct mm_struct * mm_alloc(void); 2225 2226/* mmdrop drops the mm and the page tables */ 2227extern void __mmdrop(struct mm_struct *); 2228static inline void mmdrop(struct mm_struct * mm) 2229{ 2230 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2231 __mmdrop(mm); 2232} 2233 2234/* mmput gets rid of the mappings and all user-space */ 2235extern void mmput(struct mm_struct *); 2236/* Grab a reference to a task's mm, if it is not already going away */ 2237extern struct mm_struct *get_task_mm(struct task_struct *task); 2238/* Remove the current tasks stale references to the old mm_struct */ 2239extern void mm_release(struct task_struct *, struct mm_struct *); 2240/* Allocate a new mm structure and copy contents from tsk->mm */ 2241extern struct mm_struct *dup_mm(struct task_struct *tsk); 2242 2243extern int copy_thread(unsigned long, unsigned long, unsigned long, 2244 struct task_struct *, struct pt_regs *); 2245extern void flush_thread(void); 2246extern void exit_thread(void); 2247 2248extern void exit_files(struct task_struct *); 2249extern void __cleanup_sighand(struct sighand_struct *); 2250 2251extern void exit_itimers(struct signal_struct *); 2252extern void flush_itimer_signals(void); 2253 2254extern NORET_TYPE void do_group_exit(int); 2255 2256extern void daemonize(const char *, ...); 2257extern int allow_signal(int); 2258extern int disallow_signal(int); 2259 2260extern int do_execve(const char *, 2261 const char __user * const __user *, 2262 const char __user * const __user *, struct pt_regs *); 2263extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 2264struct task_struct *fork_idle(int); 2265 2266extern void set_task_comm(struct task_struct *tsk, char *from); 2267extern char *get_task_comm(char *to, struct task_struct *tsk); 2268 2269#ifdef CONFIG_SMP 2270void scheduler_ipi(void); 2271extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2272#else 2273static inline void scheduler_ipi(void) { } 2274static inline unsigned long wait_task_inactive(struct task_struct *p, 2275 long match_state) 2276{ 2277 return 1; 2278} 2279#endif 2280 2281#define next_task(p) \ 2282 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 2283 2284#define for_each_process(p) \ 2285 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2286 2287extern bool current_is_single_threaded(void); 2288 2289/* 2290 * Careful: do_each_thread/while_each_thread is a double loop so 2291 * 'break' will not work as expected - use goto instead. 2292 */ 2293#define do_each_thread(g, t) \ 2294 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 2295 2296#define while_each_thread(g, t) \ 2297 while ((t = next_thread(t)) != g) 2298 2299static inline int get_nr_threads(struct task_struct *tsk) 2300{ 2301 return tsk->signal->nr_threads; 2302} 2303 2304static inline bool thread_group_leader(struct task_struct *p) 2305{ 2306 return p->exit_signal >= 0; 2307} 2308 2309/* Do to the insanities of de_thread it is possible for a process 2310 * to have the pid of the thread group leader without actually being 2311 * the thread group leader. For iteration through the pids in proc 2312 * all we care about is that we have a task with the appropriate 2313 * pid, we don't actually care if we have the right task. 2314 */ 2315static inline int has_group_leader_pid(struct task_struct *p) 2316{ 2317 return p->pid == p->tgid; 2318} 2319 2320static inline 2321int same_thread_group(struct task_struct *p1, struct task_struct *p2) 2322{ 2323 return p1->tgid == p2->tgid; 2324} 2325 2326static inline struct task_struct *next_thread(const struct task_struct *p) 2327{ 2328 return list_entry_rcu(p->thread_group.next, 2329 struct task_struct, thread_group); 2330} 2331 2332static inline int thread_group_empty(struct task_struct *p) 2333{ 2334 return list_empty(&p->thread_group); 2335} 2336 2337#define delay_group_leader(p) \ 2338 (thread_group_leader(p) && !thread_group_empty(p)) 2339 2340/* 2341 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2342 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2343 * pins the final release of task.io_context. Also protects ->cpuset and 2344 * ->cgroup.subsys[]. 2345 * 2346 * Nests both inside and outside of read_lock(&tasklist_lock). 2347 * It must not be nested with write_lock_irq(&tasklist_lock), 2348 * neither inside nor outside. 2349 */ 2350static inline void task_lock(struct task_struct *p) 2351{ 2352 spin_lock(&p->alloc_lock); 2353} 2354 2355static inline void task_unlock(struct task_struct *p) 2356{ 2357 spin_unlock(&p->alloc_lock); 2358} 2359 2360extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2361 unsigned long *flags); 2362 2363#define lock_task_sighand(tsk, flags) \ 2364({ struct sighand_struct *__ss; \ 2365 __cond_lock(&(tsk)->sighand->siglock, \ 2366 (__ss = __lock_task_sighand(tsk, flags))); \ 2367 __ss; \ 2368}) \ 2369 2370static inline void unlock_task_sighand(struct task_struct *tsk, 2371 unsigned long *flags) 2372{ 2373 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2374} 2375 2376#ifdef CONFIG_CGROUPS 2377static inline void threadgroup_change_begin(struct task_struct *tsk) 2378{ 2379 down_read(&tsk->signal->group_rwsem); 2380} 2381static inline void threadgroup_change_end(struct task_struct *tsk) 2382{ 2383 up_read(&tsk->signal->group_rwsem); 2384} 2385 2386/** 2387 * threadgroup_lock - lock threadgroup 2388 * @tsk: member task of the threadgroup to lock 2389 * 2390 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter 2391 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or 2392 * perform exec. This is useful for cases where the threadgroup needs to 2393 * stay stable across blockable operations. 2394 * 2395 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for 2396 * synchronization. While held, no new task will be added to threadgroup 2397 * and no existing live task will have its PF_EXITING set. 2398 * 2399 * During exec, a task goes and puts its thread group through unusual 2400 * changes. After de-threading, exclusive access is assumed to resources 2401 * which are usually shared by tasks in the same group - e.g. sighand may 2402 * be replaced with a new one. Also, the exec'ing task takes over group 2403 * leader role including its pid. Exclude these changes while locked by 2404 * grabbing cred_guard_mutex which is used to synchronize exec path. 2405 */ 2406static inline void threadgroup_lock(struct task_struct *tsk) 2407{ 2408 /* 2409 * exec uses exit for de-threading nesting group_rwsem inside 2410 * cred_guard_mutex. Grab cred_guard_mutex first. 2411 */ 2412 mutex_lock(&tsk->signal->cred_guard_mutex); 2413 down_write(&tsk->signal->group_rwsem); 2414} 2415 2416/** 2417 * threadgroup_unlock - unlock threadgroup 2418 * @tsk: member task of the threadgroup to unlock 2419 * 2420 * Reverse threadgroup_lock(). 2421 */ 2422static inline void threadgroup_unlock(struct task_struct *tsk) 2423{ 2424 up_write(&tsk->signal->group_rwsem); 2425 mutex_unlock(&tsk->signal->cred_guard_mutex); 2426} 2427#else 2428static inline void threadgroup_change_begin(struct task_struct *tsk) {} 2429static inline void threadgroup_change_end(struct task_struct *tsk) {} 2430static inline void threadgroup_lock(struct task_struct *tsk) {} 2431static inline void threadgroup_unlock(struct task_struct *tsk) {} 2432#endif 2433 2434#ifndef __HAVE_THREAD_FUNCTIONS 2435 2436#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2437#define task_stack_page(task) ((task)->stack) 2438 2439static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 2440{ 2441 *task_thread_info(p) = *task_thread_info(org); 2442 task_thread_info(p)->task = p; 2443} 2444 2445static inline unsigned long *end_of_stack(struct task_struct *p) 2446{ 2447 return (unsigned long *)(task_thread_info(p) + 1); 2448} 2449 2450#endif 2451 2452static inline int object_is_on_stack(void *obj) 2453{ 2454 void *stack = task_stack_page(current); 2455 2456 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 2457} 2458 2459extern void thread_info_cache_init(void); 2460 2461#ifdef CONFIG_DEBUG_STACK_USAGE 2462static inline unsigned long stack_not_used(struct task_struct *p) 2463{ 2464 unsigned long *n = end_of_stack(p); 2465 2466 do { /* Skip over canary */ 2467 n++; 2468 } while (!*n); 2469 2470 return (unsigned long)n - (unsigned long)end_of_stack(p); 2471} 2472#endif 2473 2474/* set thread flags in other task's structures 2475 * - see asm/thread_info.h for TIF_xxxx flags available 2476 */ 2477static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 2478{ 2479 set_ti_thread_flag(task_thread_info(tsk), flag); 2480} 2481 2482static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2483{ 2484 clear_ti_thread_flag(task_thread_info(tsk), flag); 2485} 2486 2487static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2488{ 2489 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2490} 2491 2492static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2493{ 2494 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2495} 2496 2497static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2498{ 2499 return test_ti_thread_flag(task_thread_info(tsk), flag); 2500} 2501 2502static inline void set_tsk_need_resched(struct task_struct *tsk) 2503{ 2504 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2505} 2506 2507static inline void clear_tsk_need_resched(struct task_struct *tsk) 2508{ 2509 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2510} 2511 2512static inline int test_tsk_need_resched(struct task_struct *tsk) 2513{ 2514 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2515} 2516 2517static inline int restart_syscall(void) 2518{ 2519 set_tsk_thread_flag(current, TIF_SIGPENDING); 2520 return -ERESTARTNOINTR; 2521} 2522 2523static inline int signal_pending(struct task_struct *p) 2524{ 2525 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2526} 2527 2528static inline int __fatal_signal_pending(struct task_struct *p) 2529{ 2530 return unlikely(sigismember(&p->pending.signal, SIGKILL)); 2531} 2532 2533static inline int fatal_signal_pending(struct task_struct *p) 2534{ 2535 return signal_pending(p) && __fatal_signal_pending(p); 2536} 2537 2538static inline int signal_pending_state(long state, struct task_struct *p) 2539{ 2540 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 2541 return 0; 2542 if (!signal_pending(p)) 2543 return 0; 2544 2545 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2546} 2547 2548static inline int need_resched(void) 2549{ 2550 return unlikely(test_thread_flag(TIF_NEED_RESCHED)); 2551} 2552 2553/* 2554 * cond_resched() and cond_resched_lock(): latency reduction via 2555 * explicit rescheduling in places that are safe. The return 2556 * value indicates whether a reschedule was done in fact. 2557 * cond_resched_lock() will drop the spinlock before scheduling, 2558 * cond_resched_softirq() will enable bhs before scheduling. 2559 */ 2560extern int _cond_resched(void); 2561 2562#define cond_resched() ({ \ 2563 __might_sleep(__FILE__, __LINE__, 0); \ 2564 _cond_resched(); \ 2565}) 2566 2567extern int __cond_resched_lock(spinlock_t *lock); 2568 2569#ifdef CONFIG_PREEMPT_COUNT 2570#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET 2571#else 2572#define PREEMPT_LOCK_OFFSET 0 2573#endif 2574 2575#define cond_resched_lock(lock) ({ \ 2576 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ 2577 __cond_resched_lock(lock); \ 2578}) 2579 2580extern int __cond_resched_softirq(void); 2581 2582#define cond_resched_softirq() ({ \ 2583 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 2584 __cond_resched_softirq(); \ 2585}) 2586 2587/* 2588 * Does a critical section need to be broken due to another 2589 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 2590 * but a general need for low latency) 2591 */ 2592static inline int spin_needbreak(spinlock_t *lock) 2593{ 2594#ifdef CONFIG_PREEMPT 2595 return spin_is_contended(lock); 2596#else 2597 return 0; 2598#endif 2599} 2600 2601/* 2602 * Thread group CPU time accounting. 2603 */ 2604void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 2605void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 2606 2607static inline void thread_group_cputime_init(struct signal_struct *sig) 2608{ 2609 raw_spin_lock_init(&sig->cputimer.lock); 2610} 2611 2612/* 2613 * Reevaluate whether the task has signals pending delivery. 2614 * Wake the task if so. 2615 * This is required every time the blocked sigset_t changes. 2616 * callers must hold sighand->siglock. 2617 */ 2618extern void recalc_sigpending_and_wake(struct task_struct *t); 2619extern void recalc_sigpending(void); 2620 2621extern void signal_wake_up(struct task_struct *t, int resume_stopped); 2622 2623/* 2624 * Wrappers for p->thread_info->cpu access. No-op on UP. 2625 */ 2626#ifdef CONFIG_SMP 2627 2628static inline unsigned int task_cpu(const struct task_struct *p) 2629{ 2630 return task_thread_info(p)->cpu; 2631} 2632 2633extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2634 2635#else 2636 2637static inline unsigned int task_cpu(const struct task_struct *p) 2638{ 2639 return 0; 2640} 2641 2642static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2643{ 2644} 2645 2646#endif /* CONFIG_SMP */ 2647 2648extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2649extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2650 2651extern void normalize_rt_tasks(void); 2652 2653#ifdef CONFIG_CGROUP_SCHED 2654 2655extern struct task_group root_task_group; 2656 2657extern struct task_group *sched_create_group(struct task_group *parent); 2658extern void sched_destroy_group(struct task_group *tg); 2659extern void sched_move_task(struct task_struct *tsk); 2660#ifdef CONFIG_FAIR_GROUP_SCHED 2661extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 2662extern unsigned long sched_group_shares(struct task_group *tg); 2663#endif 2664#ifdef CONFIG_RT_GROUP_SCHED 2665extern int sched_group_set_rt_runtime(struct task_group *tg, 2666 long rt_runtime_us); 2667extern long sched_group_rt_runtime(struct task_group *tg); 2668extern int sched_group_set_rt_period(struct task_group *tg, 2669 long rt_period_us); 2670extern long sched_group_rt_period(struct task_group *tg); 2671extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 2672#endif 2673#endif 2674 2675extern int task_can_switch_user(struct user_struct *up, 2676 struct task_struct *tsk); 2677 2678#ifdef CONFIG_TASK_XACCT 2679static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2680{ 2681 tsk->ioac.rchar += amt; 2682} 2683 2684static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2685{ 2686 tsk->ioac.wchar += amt; 2687} 2688 2689static inline void inc_syscr(struct task_struct *tsk) 2690{ 2691 tsk->ioac.syscr++; 2692} 2693 2694static inline void inc_syscw(struct task_struct *tsk) 2695{ 2696 tsk->ioac.syscw++; 2697} 2698#else 2699static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2700{ 2701} 2702 2703static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2704{ 2705} 2706 2707static inline void inc_syscr(struct task_struct *tsk) 2708{ 2709} 2710 2711static inline void inc_syscw(struct task_struct *tsk) 2712{ 2713} 2714#endif 2715 2716#ifndef TASK_SIZE_OF 2717#define TASK_SIZE_OF(tsk) TASK_SIZE 2718#endif 2719 2720#ifdef CONFIG_MM_OWNER 2721extern void mm_update_next_owner(struct mm_struct *mm); 2722extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2723#else 2724static inline void mm_update_next_owner(struct mm_struct *mm) 2725{ 2726} 2727 2728static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 2729{ 2730} 2731#endif /* CONFIG_MM_OWNER */ 2732 2733static inline unsigned long task_rlimit(const struct task_struct *tsk, 2734 unsigned int limit) 2735{ 2736 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); 2737} 2738 2739static inline unsigned long task_rlimit_max(const struct task_struct *tsk, 2740 unsigned int limit) 2741{ 2742 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); 2743} 2744 2745static inline unsigned long rlimit(unsigned int limit) 2746{ 2747 return task_rlimit(current, limit); 2748} 2749 2750static inline unsigned long rlimit_max(unsigned int limit) 2751{ 2752 return task_rlimit_max(current, limit); 2753} 2754 2755#endif /* __KERNEL__ */ 2756 2757#endif 2758