sched.h revision 4714d1d32d97239fb5ae3e10521d3f133a899b66
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4/*
5 * cloning flags:
6 */
7#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8#define CLONE_VM	0x00000100	/* set if VM shared between processes */
9#define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10#define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15#define CLONE_THREAD	0x00010000	/* Same thread group? */
16#define CLONE_NEWNS	0x00020000	/* New namespace group? */
17#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21#define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25   and is now available for re-use. */
26#define CLONE_NEWUTS		0x04000000	/* New utsname group? */
27#define CLONE_NEWIPC		0x08000000	/* New ipcs */
28#define CLONE_NEWUSER		0x10000000	/* New user namespace */
29#define CLONE_NEWPID		0x20000000	/* New pid namespace */
30#define CLONE_NEWNET		0x40000000	/* New network namespace */
31#define CLONE_IO		0x80000000	/* Clone io context */
32
33/*
34 * Scheduling policies
35 */
36#define SCHED_NORMAL		0
37#define SCHED_FIFO		1
38#define SCHED_RR		2
39#define SCHED_BATCH		3
40/* SCHED_ISO: reserved but not implemented yet */
41#define SCHED_IDLE		5
42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43#define SCHED_RESET_ON_FORK     0x40000000
44
45#ifdef __KERNEL__
46
47struct sched_param {
48	int sched_priority;
49};
50
51#include <asm/param.h>	/* for HZ */
52
53#include <linux/capability.h>
54#include <linux/threads.h>
55#include <linux/kernel.h>
56#include <linux/types.h>
57#include <linux/timex.h>
58#include <linux/jiffies.h>
59#include <linux/rbtree.h>
60#include <linux/thread_info.h>
61#include <linux/cpumask.h>
62#include <linux/errno.h>
63#include <linux/nodemask.h>
64#include <linux/mm_types.h>
65
66#include <asm/system.h>
67#include <asm/page.h>
68#include <asm/ptrace.h>
69#include <asm/cputime.h>
70
71#include <linux/smp.h>
72#include <linux/sem.h>
73#include <linux/signal.h>
74#include <linux/compiler.h>
75#include <linux/completion.h>
76#include <linux/pid.h>
77#include <linux/percpu.h>
78#include <linux/topology.h>
79#include <linux/proportions.h>
80#include <linux/seccomp.h>
81#include <linux/rcupdate.h>
82#include <linux/rculist.h>
83#include <linux/rtmutex.h>
84
85#include <linux/time.h>
86#include <linux/param.h>
87#include <linux/resource.h>
88#include <linux/timer.h>
89#include <linux/hrtimer.h>
90#include <linux/task_io_accounting.h>
91#include <linux/latencytop.h>
92#include <linux/cred.h>
93
94#include <asm/processor.h>
95
96struct exec_domain;
97struct futex_pi_state;
98struct robust_list_head;
99struct bio_list;
100struct fs_struct;
101struct perf_event_context;
102struct blk_plug;
103
104/*
105 * List of flags we want to share for kernel threads,
106 * if only because they are not used by them anyway.
107 */
108#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
109
110/*
111 * These are the constant used to fake the fixed-point load-average
112 * counting. Some notes:
113 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
114 *    a load-average precision of 10 bits integer + 11 bits fractional
115 *  - if you want to count load-averages more often, you need more
116 *    precision, or rounding will get you. With 2-second counting freq,
117 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
118 *    11 bit fractions.
119 */
120extern unsigned long avenrun[];		/* Load averages */
121extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
122
123#define FSHIFT		11		/* nr of bits of precision */
124#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
125#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
126#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
127#define EXP_5		2014		/* 1/exp(5sec/5min) */
128#define EXP_15		2037		/* 1/exp(5sec/15min) */
129
130#define CALC_LOAD(load,exp,n) \
131	load *= exp; \
132	load += n*(FIXED_1-exp); \
133	load >>= FSHIFT;
134
135extern unsigned long total_forks;
136extern int nr_threads;
137DECLARE_PER_CPU(unsigned long, process_counts);
138extern int nr_processes(void);
139extern unsigned long nr_running(void);
140extern unsigned long nr_uninterruptible(void);
141extern unsigned long nr_iowait(void);
142extern unsigned long nr_iowait_cpu(int cpu);
143extern unsigned long this_cpu_load(void);
144
145
146extern void calc_global_load(unsigned long ticks);
147
148extern unsigned long get_parent_ip(unsigned long addr);
149
150struct seq_file;
151struct cfs_rq;
152struct task_group;
153#ifdef CONFIG_SCHED_DEBUG
154extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
155extern void proc_sched_set_task(struct task_struct *p);
156extern void
157print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
158#else
159static inline void
160proc_sched_show_task(struct task_struct *p, struct seq_file *m)
161{
162}
163static inline void proc_sched_set_task(struct task_struct *p)
164{
165}
166static inline void
167print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
168{
169}
170#endif
171
172/*
173 * Task state bitmask. NOTE! These bits are also
174 * encoded in fs/proc/array.c: get_task_state().
175 *
176 * We have two separate sets of flags: task->state
177 * is about runnability, while task->exit_state are
178 * about the task exiting. Confusing, but this way
179 * modifying one set can't modify the other one by
180 * mistake.
181 */
182#define TASK_RUNNING		0
183#define TASK_INTERRUPTIBLE	1
184#define TASK_UNINTERRUPTIBLE	2
185#define __TASK_STOPPED		4
186#define __TASK_TRACED		8
187/* in tsk->exit_state */
188#define EXIT_ZOMBIE		16
189#define EXIT_DEAD		32
190/* in tsk->state again */
191#define TASK_DEAD		64
192#define TASK_WAKEKILL		128
193#define TASK_WAKING		256
194#define TASK_STATE_MAX		512
195
196#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
197
198extern char ___assert_task_state[1 - 2*!!(
199		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
200
201/* Convenience macros for the sake of set_task_state */
202#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
203#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
204#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
205
206/* Convenience macros for the sake of wake_up */
207#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
208#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
209
210/* get_task_state() */
211#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
212				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
213				 __TASK_TRACED)
214
215#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
216#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
217#define task_is_dead(task)	((task)->exit_state != 0)
218#define task_is_stopped_or_traced(task)	\
219			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
220#define task_contributes_to_load(task)	\
221				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
222				 (task->flags & PF_FREEZING) == 0)
223
224#define __set_task_state(tsk, state_value)		\
225	do { (tsk)->state = (state_value); } while (0)
226#define set_task_state(tsk, state_value)		\
227	set_mb((tsk)->state, (state_value))
228
229/*
230 * set_current_state() includes a barrier so that the write of current->state
231 * is correctly serialised wrt the caller's subsequent test of whether to
232 * actually sleep:
233 *
234 *	set_current_state(TASK_UNINTERRUPTIBLE);
235 *	if (do_i_need_to_sleep())
236 *		schedule();
237 *
238 * If the caller does not need such serialisation then use __set_current_state()
239 */
240#define __set_current_state(state_value)			\
241	do { current->state = (state_value); } while (0)
242#define set_current_state(state_value)		\
243	set_mb(current->state, (state_value))
244
245/* Task command name length */
246#define TASK_COMM_LEN 16
247
248#include <linux/spinlock.h>
249
250/*
251 * This serializes "schedule()" and also protects
252 * the run-queue from deletions/modifications (but
253 * _adding_ to the beginning of the run-queue has
254 * a separate lock).
255 */
256extern rwlock_t tasklist_lock;
257extern spinlock_t mmlist_lock;
258
259struct task_struct;
260
261#ifdef CONFIG_PROVE_RCU
262extern int lockdep_tasklist_lock_is_held(void);
263#endif /* #ifdef CONFIG_PROVE_RCU */
264
265extern void sched_init(void);
266extern void sched_init_smp(void);
267extern asmlinkage void schedule_tail(struct task_struct *prev);
268extern void init_idle(struct task_struct *idle, int cpu);
269extern void init_idle_bootup_task(struct task_struct *idle);
270
271extern int runqueue_is_locked(int cpu);
272
273extern cpumask_var_t nohz_cpu_mask;
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275extern void select_nohz_load_balancer(int stop_tick);
276extern int get_nohz_timer_target(void);
277#else
278static inline void select_nohz_load_balancer(int stop_tick) { }
279#endif
280
281/*
282 * Only dump TASK_* tasks. (0 for all tasks)
283 */
284extern void show_state_filter(unsigned long state_filter);
285
286static inline void show_state(void)
287{
288	show_state_filter(0);
289}
290
291extern void show_regs(struct pt_regs *);
292
293/*
294 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
295 * task), SP is the stack pointer of the first frame that should be shown in the back
296 * trace (or NULL if the entire call-chain of the task should be shown).
297 */
298extern void show_stack(struct task_struct *task, unsigned long *sp);
299
300void io_schedule(void);
301long io_schedule_timeout(long timeout);
302
303extern void cpu_init (void);
304extern void trap_init(void);
305extern void update_process_times(int user);
306extern void scheduler_tick(void);
307
308extern void sched_show_task(struct task_struct *p);
309
310#ifdef CONFIG_LOCKUP_DETECTOR
311extern void touch_softlockup_watchdog(void);
312extern void touch_softlockup_watchdog_sync(void);
313extern void touch_all_softlockup_watchdogs(void);
314extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
315				  void __user *buffer,
316				  size_t *lenp, loff_t *ppos);
317extern unsigned int  softlockup_panic;
318void lockup_detector_init(void);
319#else
320static inline void touch_softlockup_watchdog(void)
321{
322}
323static inline void touch_softlockup_watchdog_sync(void)
324{
325}
326static inline void touch_all_softlockup_watchdogs(void)
327{
328}
329static inline void lockup_detector_init(void)
330{
331}
332#endif
333
334#ifdef CONFIG_DETECT_HUNG_TASK
335extern unsigned int  sysctl_hung_task_panic;
336extern unsigned long sysctl_hung_task_check_count;
337extern unsigned long sysctl_hung_task_timeout_secs;
338extern unsigned long sysctl_hung_task_warnings;
339extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
340					 void __user *buffer,
341					 size_t *lenp, loff_t *ppos);
342#else
343/* Avoid need for ifdefs elsewhere in the code */
344enum { sysctl_hung_task_timeout_secs = 0 };
345#endif
346
347/* Attach to any functions which should be ignored in wchan output. */
348#define __sched		__attribute__((__section__(".sched.text")))
349
350/* Linker adds these: start and end of __sched functions */
351extern char __sched_text_start[], __sched_text_end[];
352
353/* Is this address in the __sched functions? */
354extern int in_sched_functions(unsigned long addr);
355
356#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
357extern signed long schedule_timeout(signed long timeout);
358extern signed long schedule_timeout_interruptible(signed long timeout);
359extern signed long schedule_timeout_killable(signed long timeout);
360extern signed long schedule_timeout_uninterruptible(signed long timeout);
361asmlinkage void schedule(void);
362extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
363
364struct nsproxy;
365struct user_namespace;
366
367/*
368 * Default maximum number of active map areas, this limits the number of vmas
369 * per mm struct. Users can overwrite this number by sysctl but there is a
370 * problem.
371 *
372 * When a program's coredump is generated as ELF format, a section is created
373 * per a vma. In ELF, the number of sections is represented in unsigned short.
374 * This means the number of sections should be smaller than 65535 at coredump.
375 * Because the kernel adds some informative sections to a image of program at
376 * generating coredump, we need some margin. The number of extra sections is
377 * 1-3 now and depends on arch. We use "5" as safe margin, here.
378 */
379#define MAPCOUNT_ELF_CORE_MARGIN	(5)
380#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
381
382extern int sysctl_max_map_count;
383
384#include <linux/aio.h>
385
386#ifdef CONFIG_MMU
387extern void arch_pick_mmap_layout(struct mm_struct *mm);
388extern unsigned long
389arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
390		       unsigned long, unsigned long);
391extern unsigned long
392arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
393			  unsigned long len, unsigned long pgoff,
394			  unsigned long flags);
395extern void arch_unmap_area(struct mm_struct *, unsigned long);
396extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
397#else
398static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
399#endif
400
401
402extern void set_dumpable(struct mm_struct *mm, int value);
403extern int get_dumpable(struct mm_struct *mm);
404
405/* mm flags */
406/* dumpable bits */
407#define MMF_DUMPABLE      0  /* core dump is permitted */
408#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
409
410#define MMF_DUMPABLE_BITS 2
411#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
412
413/* coredump filter bits */
414#define MMF_DUMP_ANON_PRIVATE	2
415#define MMF_DUMP_ANON_SHARED	3
416#define MMF_DUMP_MAPPED_PRIVATE	4
417#define MMF_DUMP_MAPPED_SHARED	5
418#define MMF_DUMP_ELF_HEADERS	6
419#define MMF_DUMP_HUGETLB_PRIVATE 7
420#define MMF_DUMP_HUGETLB_SHARED  8
421
422#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
423#define MMF_DUMP_FILTER_BITS	7
424#define MMF_DUMP_FILTER_MASK \
425	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
426#define MMF_DUMP_FILTER_DEFAULT \
427	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
428	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
429
430#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
431# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
432#else
433# define MMF_DUMP_MASK_DEFAULT_ELF	0
434#endif
435					/* leave room for more dump flags */
436#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
437#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
438
439#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
440
441struct sighand_struct {
442	atomic_t		count;
443	struct k_sigaction	action[_NSIG];
444	spinlock_t		siglock;
445	wait_queue_head_t	signalfd_wqh;
446};
447
448struct pacct_struct {
449	int			ac_flag;
450	long			ac_exitcode;
451	unsigned long		ac_mem;
452	cputime_t		ac_utime, ac_stime;
453	unsigned long		ac_minflt, ac_majflt;
454};
455
456struct cpu_itimer {
457	cputime_t expires;
458	cputime_t incr;
459	u32 error;
460	u32 incr_error;
461};
462
463/**
464 * struct task_cputime - collected CPU time counts
465 * @utime:		time spent in user mode, in &cputime_t units
466 * @stime:		time spent in kernel mode, in &cputime_t units
467 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
468 *
469 * This structure groups together three kinds of CPU time that are
470 * tracked for threads and thread groups.  Most things considering
471 * CPU time want to group these counts together and treat all three
472 * of them in parallel.
473 */
474struct task_cputime {
475	cputime_t utime;
476	cputime_t stime;
477	unsigned long long sum_exec_runtime;
478};
479/* Alternate field names when used to cache expirations. */
480#define prof_exp	stime
481#define virt_exp	utime
482#define sched_exp	sum_exec_runtime
483
484#define INIT_CPUTIME	\
485	(struct task_cputime) {					\
486		.utime = cputime_zero,				\
487		.stime = cputime_zero,				\
488		.sum_exec_runtime = 0,				\
489	}
490
491/*
492 * Disable preemption until the scheduler is running.
493 * Reset by start_kernel()->sched_init()->init_idle().
494 *
495 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
496 * before the scheduler is active -- see should_resched().
497 */
498#define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
499
500/**
501 * struct thread_group_cputimer - thread group interval timer counts
502 * @cputime:		thread group interval timers.
503 * @running:		non-zero when there are timers running and
504 * 			@cputime receives updates.
505 * @lock:		lock for fields in this struct.
506 *
507 * This structure contains the version of task_cputime, above, that is
508 * used for thread group CPU timer calculations.
509 */
510struct thread_group_cputimer {
511	struct task_cputime cputime;
512	int running;
513	spinlock_t lock;
514};
515
516#include <linux/rwsem.h>
517struct autogroup;
518
519/*
520 * NOTE! "signal_struct" does not have its own
521 * locking, because a shared signal_struct always
522 * implies a shared sighand_struct, so locking
523 * sighand_struct is always a proper superset of
524 * the locking of signal_struct.
525 */
526struct signal_struct {
527	atomic_t		sigcnt;
528	atomic_t		live;
529	int			nr_threads;
530
531	wait_queue_head_t	wait_chldexit;	/* for wait4() */
532
533	/* current thread group signal load-balancing target: */
534	struct task_struct	*curr_target;
535
536	/* shared signal handling: */
537	struct sigpending	shared_pending;
538
539	/* thread group exit support */
540	int			group_exit_code;
541	/* overloaded:
542	 * - notify group_exit_task when ->count is equal to notify_count
543	 * - everyone except group_exit_task is stopped during signal delivery
544	 *   of fatal signals, group_exit_task processes the signal.
545	 */
546	int			notify_count;
547	struct task_struct	*group_exit_task;
548
549	/* thread group stop support, overloads group_exit_code too */
550	int			group_stop_count;
551	unsigned int		flags; /* see SIGNAL_* flags below */
552
553	/* POSIX.1b Interval Timers */
554	struct list_head posix_timers;
555
556	/* ITIMER_REAL timer for the process */
557	struct hrtimer real_timer;
558	struct pid *leader_pid;
559	ktime_t it_real_incr;
560
561	/*
562	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
563	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
564	 * values are defined to 0 and 1 respectively
565	 */
566	struct cpu_itimer it[2];
567
568	/*
569	 * Thread group totals for process CPU timers.
570	 * See thread_group_cputimer(), et al, for details.
571	 */
572	struct thread_group_cputimer cputimer;
573
574	/* Earliest-expiration cache. */
575	struct task_cputime cputime_expires;
576
577	struct list_head cpu_timers[3];
578
579	struct pid *tty_old_pgrp;
580
581	/* boolean value for session group leader */
582	int leader;
583
584	struct tty_struct *tty; /* NULL if no tty */
585
586#ifdef CONFIG_SCHED_AUTOGROUP
587	struct autogroup *autogroup;
588#endif
589	/*
590	 * Cumulative resource counters for dead threads in the group,
591	 * and for reaped dead child processes forked by this group.
592	 * Live threads maintain their own counters and add to these
593	 * in __exit_signal, except for the group leader.
594	 */
595	cputime_t utime, stime, cutime, cstime;
596	cputime_t gtime;
597	cputime_t cgtime;
598#ifndef CONFIG_VIRT_CPU_ACCOUNTING
599	cputime_t prev_utime, prev_stime;
600#endif
601	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
602	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
603	unsigned long inblock, oublock, cinblock, coublock;
604	unsigned long maxrss, cmaxrss;
605	struct task_io_accounting ioac;
606
607	/*
608	 * Cumulative ns of schedule CPU time fo dead threads in the
609	 * group, not including a zombie group leader, (This only differs
610	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
611	 * other than jiffies.)
612	 */
613	unsigned long long sum_sched_runtime;
614
615	/*
616	 * We don't bother to synchronize most readers of this at all,
617	 * because there is no reader checking a limit that actually needs
618	 * to get both rlim_cur and rlim_max atomically, and either one
619	 * alone is a single word that can safely be read normally.
620	 * getrlimit/setrlimit use task_lock(current->group_leader) to
621	 * protect this instead of the siglock, because they really
622	 * have no need to disable irqs.
623	 */
624	struct rlimit rlim[RLIM_NLIMITS];
625
626#ifdef CONFIG_BSD_PROCESS_ACCT
627	struct pacct_struct pacct;	/* per-process accounting information */
628#endif
629#ifdef CONFIG_TASKSTATS
630	struct taskstats *stats;
631#endif
632#ifdef CONFIG_AUDIT
633	unsigned audit_tty;
634	struct tty_audit_buf *tty_audit_buf;
635#endif
636#ifdef CONFIG_CGROUPS
637	/*
638	 * The threadgroup_fork_lock prevents threads from forking with
639	 * CLONE_THREAD while held for writing. Use this for fork-sensitive
640	 * threadgroup-wide operations. It's taken for reading in fork.c in
641	 * copy_process().
642	 * Currently only needed write-side by cgroups.
643	 */
644	struct rw_semaphore threadgroup_fork_lock;
645#endif
646
647	int oom_adj;		/* OOM kill score adjustment (bit shift) */
648	int oom_score_adj;	/* OOM kill score adjustment */
649	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
650				 * Only settable by CAP_SYS_RESOURCE. */
651
652	struct mutex cred_guard_mutex;	/* guard against foreign influences on
653					 * credential calculations
654					 * (notably. ptrace) */
655};
656
657/* Context switch must be unlocked if interrupts are to be enabled */
658#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
659# define __ARCH_WANT_UNLOCKED_CTXSW
660#endif
661
662/*
663 * Bits in flags field of signal_struct.
664 */
665#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
666#define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
667#define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
668/*
669 * Pending notifications to parent.
670 */
671#define SIGNAL_CLD_STOPPED	0x00000010
672#define SIGNAL_CLD_CONTINUED	0x00000020
673#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
674
675#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
676
677/* If true, all threads except ->group_exit_task have pending SIGKILL */
678static inline int signal_group_exit(const struct signal_struct *sig)
679{
680	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
681		(sig->group_exit_task != NULL);
682}
683
684/*
685 * Some day this will be a full-fledged user tracking system..
686 */
687struct user_struct {
688	atomic_t __count;	/* reference count */
689	atomic_t processes;	/* How many processes does this user have? */
690	atomic_t files;		/* How many open files does this user have? */
691	atomic_t sigpending;	/* How many pending signals does this user have? */
692#ifdef CONFIG_INOTIFY_USER
693	atomic_t inotify_watches; /* How many inotify watches does this user have? */
694	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
695#endif
696#ifdef CONFIG_FANOTIFY
697	atomic_t fanotify_listeners;
698#endif
699#ifdef CONFIG_EPOLL
700	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
701#endif
702#ifdef CONFIG_POSIX_MQUEUE
703	/* protected by mq_lock	*/
704	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
705#endif
706	unsigned long locked_shm; /* How many pages of mlocked shm ? */
707
708#ifdef CONFIG_KEYS
709	struct key *uid_keyring;	/* UID specific keyring */
710	struct key *session_keyring;	/* UID's default session keyring */
711#endif
712
713	/* Hash table maintenance information */
714	struct hlist_node uidhash_node;
715	uid_t uid;
716	struct user_namespace *user_ns;
717
718#ifdef CONFIG_PERF_EVENTS
719	atomic_long_t locked_vm;
720#endif
721};
722
723extern int uids_sysfs_init(void);
724
725extern struct user_struct *find_user(uid_t);
726
727extern struct user_struct root_user;
728#define INIT_USER (&root_user)
729
730
731struct backing_dev_info;
732struct reclaim_state;
733
734#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
735struct sched_info {
736	/* cumulative counters */
737	unsigned long pcount;	      /* # of times run on this cpu */
738	unsigned long long run_delay; /* time spent waiting on a runqueue */
739
740	/* timestamps */
741	unsigned long long last_arrival,/* when we last ran on a cpu */
742			   last_queued;	/* when we were last queued to run */
743};
744#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
745
746#ifdef CONFIG_TASK_DELAY_ACCT
747struct task_delay_info {
748	spinlock_t	lock;
749	unsigned int	flags;	/* Private per-task flags */
750
751	/* For each stat XXX, add following, aligned appropriately
752	 *
753	 * struct timespec XXX_start, XXX_end;
754	 * u64 XXX_delay;
755	 * u32 XXX_count;
756	 *
757	 * Atomicity of updates to XXX_delay, XXX_count protected by
758	 * single lock above (split into XXX_lock if contention is an issue).
759	 */
760
761	/*
762	 * XXX_count is incremented on every XXX operation, the delay
763	 * associated with the operation is added to XXX_delay.
764	 * XXX_delay contains the accumulated delay time in nanoseconds.
765	 */
766	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
767	u64 blkio_delay;	/* wait for sync block io completion */
768	u64 swapin_delay;	/* wait for swapin block io completion */
769	u32 blkio_count;	/* total count of the number of sync block */
770				/* io operations performed */
771	u32 swapin_count;	/* total count of the number of swapin block */
772				/* io operations performed */
773
774	struct timespec freepages_start, freepages_end;
775	u64 freepages_delay;	/* wait for memory reclaim */
776	u32 freepages_count;	/* total count of memory reclaim */
777};
778#endif	/* CONFIG_TASK_DELAY_ACCT */
779
780static inline int sched_info_on(void)
781{
782#ifdef CONFIG_SCHEDSTATS
783	return 1;
784#elif defined(CONFIG_TASK_DELAY_ACCT)
785	extern int delayacct_on;
786	return delayacct_on;
787#else
788	return 0;
789#endif
790}
791
792enum cpu_idle_type {
793	CPU_IDLE,
794	CPU_NOT_IDLE,
795	CPU_NEWLY_IDLE,
796	CPU_MAX_IDLE_TYPES
797};
798
799/*
800 * Increase resolution of nice-level calculations for 64-bit architectures.
801 * The extra resolution improves shares distribution and load balancing of
802 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
803 * hierarchies, especially on larger systems. This is not a user-visible change
804 * and does not change the user-interface for setting shares/weights.
805 *
806 * We increase resolution only if we have enough bits to allow this increased
807 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
808 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
809 * increased costs.
810 */
811#if BITS_PER_LONG > 32
812# define SCHED_LOAD_RESOLUTION	10
813# define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
814# define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
815#else
816# define SCHED_LOAD_RESOLUTION	0
817# define scale_load(w)		(w)
818# define scale_load_down(w)	(w)
819#endif
820
821#define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
822#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
823
824/*
825 * Increase resolution of cpu_power calculations
826 */
827#define SCHED_POWER_SHIFT	10
828#define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
829
830/*
831 * sched-domains (multiprocessor balancing) declarations:
832 */
833#ifdef CONFIG_SMP
834#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
835#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
836#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
837#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
838#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
839#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
840#define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
841#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
842#define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
843#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
844#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
845#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
846#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
847
848enum powersavings_balance_level {
849	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
850	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
851					 * first for long running threads
852					 */
853	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
854					 * cpu package for power savings
855					 */
856	MAX_POWERSAVINGS_BALANCE_LEVELS
857};
858
859extern int sched_mc_power_savings, sched_smt_power_savings;
860
861static inline int sd_balance_for_mc_power(void)
862{
863	if (sched_smt_power_savings)
864		return SD_POWERSAVINGS_BALANCE;
865
866	if (!sched_mc_power_savings)
867		return SD_PREFER_SIBLING;
868
869	return 0;
870}
871
872static inline int sd_balance_for_package_power(void)
873{
874	if (sched_mc_power_savings | sched_smt_power_savings)
875		return SD_POWERSAVINGS_BALANCE;
876
877	return SD_PREFER_SIBLING;
878}
879
880extern int __weak arch_sd_sibiling_asym_packing(void);
881
882/*
883 * Optimise SD flags for power savings:
884 * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
885 * Keep default SD flags if sched_{smt,mc}_power_saving=0
886 */
887
888static inline int sd_power_saving_flags(void)
889{
890	if (sched_mc_power_savings | sched_smt_power_savings)
891		return SD_BALANCE_NEWIDLE;
892
893	return 0;
894}
895
896struct sched_group {
897	struct sched_group *next;	/* Must be a circular list */
898	atomic_t ref;
899
900	/*
901	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
902	 * single CPU.
903	 */
904	unsigned int cpu_power, cpu_power_orig;
905	unsigned int group_weight;
906
907	/*
908	 * The CPUs this group covers.
909	 *
910	 * NOTE: this field is variable length. (Allocated dynamically
911	 * by attaching extra space to the end of the structure,
912	 * depending on how many CPUs the kernel has booted up with)
913	 */
914	unsigned long cpumask[0];
915};
916
917static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
918{
919	return to_cpumask(sg->cpumask);
920}
921
922struct sched_domain_attr {
923	int relax_domain_level;
924};
925
926#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
927	.relax_domain_level = -1,			\
928}
929
930extern int sched_domain_level_max;
931
932struct sched_domain {
933	/* These fields must be setup */
934	struct sched_domain *parent;	/* top domain must be null terminated */
935	struct sched_domain *child;	/* bottom domain must be null terminated */
936	struct sched_group *groups;	/* the balancing groups of the domain */
937	unsigned long min_interval;	/* Minimum balance interval ms */
938	unsigned long max_interval;	/* Maximum balance interval ms */
939	unsigned int busy_factor;	/* less balancing by factor if busy */
940	unsigned int imbalance_pct;	/* No balance until over watermark */
941	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
942	unsigned int busy_idx;
943	unsigned int idle_idx;
944	unsigned int newidle_idx;
945	unsigned int wake_idx;
946	unsigned int forkexec_idx;
947	unsigned int smt_gain;
948	int flags;			/* See SD_* */
949	int level;
950
951	/* Runtime fields. */
952	unsigned long last_balance;	/* init to jiffies. units in jiffies */
953	unsigned int balance_interval;	/* initialise to 1. units in ms. */
954	unsigned int nr_balance_failed; /* initialise to 0 */
955
956	u64 last_update;
957
958#ifdef CONFIG_SCHEDSTATS
959	/* load_balance() stats */
960	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
961	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
962	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
963	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
964	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
965	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
966	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
967	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
968
969	/* Active load balancing */
970	unsigned int alb_count;
971	unsigned int alb_failed;
972	unsigned int alb_pushed;
973
974	/* SD_BALANCE_EXEC stats */
975	unsigned int sbe_count;
976	unsigned int sbe_balanced;
977	unsigned int sbe_pushed;
978
979	/* SD_BALANCE_FORK stats */
980	unsigned int sbf_count;
981	unsigned int sbf_balanced;
982	unsigned int sbf_pushed;
983
984	/* try_to_wake_up() stats */
985	unsigned int ttwu_wake_remote;
986	unsigned int ttwu_move_affine;
987	unsigned int ttwu_move_balance;
988#endif
989#ifdef CONFIG_SCHED_DEBUG
990	char *name;
991#endif
992	union {
993		void *private;		/* used during construction */
994		struct rcu_head rcu;	/* used during destruction */
995	};
996
997	unsigned int span_weight;
998	/*
999	 * Span of all CPUs in this domain.
1000	 *
1001	 * NOTE: this field is variable length. (Allocated dynamically
1002	 * by attaching extra space to the end of the structure,
1003	 * depending on how many CPUs the kernel has booted up with)
1004	 */
1005	unsigned long span[0];
1006};
1007
1008static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1009{
1010	return to_cpumask(sd->span);
1011}
1012
1013extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1014				    struct sched_domain_attr *dattr_new);
1015
1016/* Allocate an array of sched domains, for partition_sched_domains(). */
1017cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1018void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1019
1020/* Test a flag in parent sched domain */
1021static inline int test_sd_parent(struct sched_domain *sd, int flag)
1022{
1023	if (sd->parent && (sd->parent->flags & flag))
1024		return 1;
1025
1026	return 0;
1027}
1028
1029unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1030unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1031
1032#else /* CONFIG_SMP */
1033
1034struct sched_domain_attr;
1035
1036static inline void
1037partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1038			struct sched_domain_attr *dattr_new)
1039{
1040}
1041#endif	/* !CONFIG_SMP */
1042
1043
1044struct io_context;			/* See blkdev.h */
1045
1046
1047#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1048extern void prefetch_stack(struct task_struct *t);
1049#else
1050static inline void prefetch_stack(struct task_struct *t) { }
1051#endif
1052
1053struct audit_context;		/* See audit.c */
1054struct mempolicy;
1055struct pipe_inode_info;
1056struct uts_namespace;
1057
1058struct rq;
1059struct sched_domain;
1060
1061/*
1062 * wake flags
1063 */
1064#define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1065#define WF_FORK		0x02		/* child wakeup after fork */
1066
1067#define ENQUEUE_WAKEUP		1
1068#define ENQUEUE_HEAD		2
1069#ifdef CONFIG_SMP
1070#define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
1071#else
1072#define ENQUEUE_WAKING		0
1073#endif
1074
1075#define DEQUEUE_SLEEP		1
1076
1077struct sched_class {
1078	const struct sched_class *next;
1079
1080	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1081	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1082	void (*yield_task) (struct rq *rq);
1083	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1084
1085	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1086
1087	struct task_struct * (*pick_next_task) (struct rq *rq);
1088	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1089
1090#ifdef CONFIG_SMP
1091	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1092
1093	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1094	void (*post_schedule) (struct rq *this_rq);
1095	void (*task_waking) (struct task_struct *task);
1096	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1097
1098	void (*set_cpus_allowed)(struct task_struct *p,
1099				 const struct cpumask *newmask);
1100
1101	void (*rq_online)(struct rq *rq);
1102	void (*rq_offline)(struct rq *rq);
1103#endif
1104
1105	void (*set_curr_task) (struct rq *rq);
1106	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1107	void (*task_fork) (struct task_struct *p);
1108
1109	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1110	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1111	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1112			     int oldprio);
1113
1114	unsigned int (*get_rr_interval) (struct rq *rq,
1115					 struct task_struct *task);
1116
1117#ifdef CONFIG_FAIR_GROUP_SCHED
1118	void (*task_move_group) (struct task_struct *p, int on_rq);
1119#endif
1120};
1121
1122struct load_weight {
1123	unsigned long weight, inv_weight;
1124};
1125
1126#ifdef CONFIG_SCHEDSTATS
1127struct sched_statistics {
1128	u64			wait_start;
1129	u64			wait_max;
1130	u64			wait_count;
1131	u64			wait_sum;
1132	u64			iowait_count;
1133	u64			iowait_sum;
1134
1135	u64			sleep_start;
1136	u64			sleep_max;
1137	s64			sum_sleep_runtime;
1138
1139	u64			block_start;
1140	u64			block_max;
1141	u64			exec_max;
1142	u64			slice_max;
1143
1144	u64			nr_migrations_cold;
1145	u64			nr_failed_migrations_affine;
1146	u64			nr_failed_migrations_running;
1147	u64			nr_failed_migrations_hot;
1148	u64			nr_forced_migrations;
1149
1150	u64			nr_wakeups;
1151	u64			nr_wakeups_sync;
1152	u64			nr_wakeups_migrate;
1153	u64			nr_wakeups_local;
1154	u64			nr_wakeups_remote;
1155	u64			nr_wakeups_affine;
1156	u64			nr_wakeups_affine_attempts;
1157	u64			nr_wakeups_passive;
1158	u64			nr_wakeups_idle;
1159};
1160#endif
1161
1162struct sched_entity {
1163	struct load_weight	load;		/* for load-balancing */
1164	struct rb_node		run_node;
1165	struct list_head	group_node;
1166	unsigned int		on_rq;
1167
1168	u64			exec_start;
1169	u64			sum_exec_runtime;
1170	u64			vruntime;
1171	u64			prev_sum_exec_runtime;
1172
1173	u64			nr_migrations;
1174
1175#ifdef CONFIG_SCHEDSTATS
1176	struct sched_statistics statistics;
1177#endif
1178
1179#ifdef CONFIG_FAIR_GROUP_SCHED
1180	struct sched_entity	*parent;
1181	/* rq on which this entity is (to be) queued: */
1182	struct cfs_rq		*cfs_rq;
1183	/* rq "owned" by this entity/group: */
1184	struct cfs_rq		*my_q;
1185#endif
1186};
1187
1188struct sched_rt_entity {
1189	struct list_head run_list;
1190	unsigned long timeout;
1191	unsigned int time_slice;
1192	int nr_cpus_allowed;
1193
1194	struct sched_rt_entity *back;
1195#ifdef CONFIG_RT_GROUP_SCHED
1196	struct sched_rt_entity	*parent;
1197	/* rq on which this entity is (to be) queued: */
1198	struct rt_rq		*rt_rq;
1199	/* rq "owned" by this entity/group: */
1200	struct rt_rq		*my_q;
1201#endif
1202};
1203
1204struct rcu_node;
1205
1206enum perf_event_task_context {
1207	perf_invalid_context = -1,
1208	perf_hw_context = 0,
1209	perf_sw_context,
1210	perf_nr_task_contexts,
1211};
1212
1213struct task_struct {
1214	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1215	void *stack;
1216	atomic_t usage;
1217	unsigned int flags;	/* per process flags, defined below */
1218	unsigned int ptrace;
1219
1220#ifdef CONFIG_SMP
1221	struct task_struct *wake_entry;
1222	int on_cpu;
1223#endif
1224	int on_rq;
1225
1226	int prio, static_prio, normal_prio;
1227	unsigned int rt_priority;
1228	const struct sched_class *sched_class;
1229	struct sched_entity se;
1230	struct sched_rt_entity rt;
1231
1232#ifdef CONFIG_PREEMPT_NOTIFIERS
1233	/* list of struct preempt_notifier: */
1234	struct hlist_head preempt_notifiers;
1235#endif
1236
1237	/*
1238	 * fpu_counter contains the number of consecutive context switches
1239	 * that the FPU is used. If this is over a threshold, the lazy fpu
1240	 * saving becomes unlazy to save the trap. This is an unsigned char
1241	 * so that after 256 times the counter wraps and the behavior turns
1242	 * lazy again; this to deal with bursty apps that only use FPU for
1243	 * a short time
1244	 */
1245	unsigned char fpu_counter;
1246#ifdef CONFIG_BLK_DEV_IO_TRACE
1247	unsigned int btrace_seq;
1248#endif
1249
1250	unsigned int policy;
1251	cpumask_t cpus_allowed;
1252
1253#ifdef CONFIG_PREEMPT_RCU
1254	int rcu_read_lock_nesting;
1255	char rcu_read_unlock_special;
1256	struct list_head rcu_node_entry;
1257#endif /* #ifdef CONFIG_PREEMPT_RCU */
1258#ifdef CONFIG_TREE_PREEMPT_RCU
1259	struct rcu_node *rcu_blocked_node;
1260#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1261#ifdef CONFIG_RCU_BOOST
1262	struct rt_mutex *rcu_boost_mutex;
1263#endif /* #ifdef CONFIG_RCU_BOOST */
1264
1265#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1266	struct sched_info sched_info;
1267#endif
1268
1269	struct list_head tasks;
1270#ifdef CONFIG_SMP
1271	struct plist_node pushable_tasks;
1272#endif
1273
1274	struct mm_struct *mm, *active_mm;
1275#ifdef CONFIG_COMPAT_BRK
1276	unsigned brk_randomized:1;
1277#endif
1278#if defined(SPLIT_RSS_COUNTING)
1279	struct task_rss_stat	rss_stat;
1280#endif
1281/* task state */
1282	int exit_state;
1283	int exit_code, exit_signal;
1284	int pdeath_signal;  /*  The signal sent when the parent dies  */
1285	unsigned int group_stop;	/* GROUP_STOP_*, siglock protected */
1286	/* ??? */
1287	unsigned int personality;
1288	unsigned did_exec:1;
1289	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1290				 * execve */
1291	unsigned in_iowait:1;
1292
1293
1294	/* Revert to default priority/policy when forking */
1295	unsigned sched_reset_on_fork:1;
1296	unsigned sched_contributes_to_load:1;
1297
1298	pid_t pid;
1299	pid_t tgid;
1300
1301#ifdef CONFIG_CC_STACKPROTECTOR
1302	/* Canary value for the -fstack-protector gcc feature */
1303	unsigned long stack_canary;
1304#endif
1305
1306	/*
1307	 * pointers to (original) parent process, youngest child, younger sibling,
1308	 * older sibling, respectively.  (p->father can be replaced with
1309	 * p->real_parent->pid)
1310	 */
1311	struct task_struct *real_parent; /* real parent process */
1312	struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
1313	/*
1314	 * children/sibling forms the list of my natural children
1315	 */
1316	struct list_head children;	/* list of my children */
1317	struct list_head sibling;	/* linkage in my parent's children list */
1318	struct task_struct *group_leader;	/* threadgroup leader */
1319
1320	/*
1321	 * ptraced is the list of tasks this task is using ptrace on.
1322	 * This includes both natural children and PTRACE_ATTACH targets.
1323	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1324	 */
1325	struct list_head ptraced;
1326	struct list_head ptrace_entry;
1327
1328	/* PID/PID hash table linkage. */
1329	struct pid_link pids[PIDTYPE_MAX];
1330	struct list_head thread_group;
1331
1332	struct completion *vfork_done;		/* for vfork() */
1333	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1334	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1335
1336	cputime_t utime, stime, utimescaled, stimescaled;
1337	cputime_t gtime;
1338#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1339	cputime_t prev_utime, prev_stime;
1340#endif
1341	unsigned long nvcsw, nivcsw; /* context switch counts */
1342	struct timespec start_time; 		/* monotonic time */
1343	struct timespec real_start_time;	/* boot based time */
1344/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1345	unsigned long min_flt, maj_flt;
1346
1347	struct task_cputime cputime_expires;
1348	struct list_head cpu_timers[3];
1349
1350/* process credentials */
1351	const struct cred __rcu *real_cred; /* objective and real subjective task
1352					 * credentials (COW) */
1353	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1354					 * credentials (COW) */
1355	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1356
1357	char comm[TASK_COMM_LEN]; /* executable name excluding path
1358				     - access with [gs]et_task_comm (which lock
1359				       it with task_lock())
1360				     - initialized normally by setup_new_exec */
1361/* file system info */
1362	int link_count, total_link_count;
1363#ifdef CONFIG_SYSVIPC
1364/* ipc stuff */
1365	struct sysv_sem sysvsem;
1366#endif
1367#ifdef CONFIG_DETECT_HUNG_TASK
1368/* hung task detection */
1369	unsigned long last_switch_count;
1370#endif
1371/* CPU-specific state of this task */
1372	struct thread_struct thread;
1373/* filesystem information */
1374	struct fs_struct *fs;
1375/* open file information */
1376	struct files_struct *files;
1377/* namespaces */
1378	struct nsproxy *nsproxy;
1379/* signal handlers */
1380	struct signal_struct *signal;
1381	struct sighand_struct *sighand;
1382
1383	sigset_t blocked, real_blocked;
1384	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1385	struct sigpending pending;
1386
1387	unsigned long sas_ss_sp;
1388	size_t sas_ss_size;
1389	int (*notifier)(void *priv);
1390	void *notifier_data;
1391	sigset_t *notifier_mask;
1392	struct audit_context *audit_context;
1393#ifdef CONFIG_AUDITSYSCALL
1394	uid_t loginuid;
1395	unsigned int sessionid;
1396#endif
1397	seccomp_t seccomp;
1398
1399/* Thread group tracking */
1400   	u32 parent_exec_id;
1401   	u32 self_exec_id;
1402/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1403 * mempolicy */
1404	spinlock_t alloc_lock;
1405
1406#ifdef CONFIG_GENERIC_HARDIRQS
1407	/* IRQ handler threads */
1408	struct irqaction *irqaction;
1409#endif
1410
1411	/* Protection of the PI data structures: */
1412	raw_spinlock_t pi_lock;
1413
1414#ifdef CONFIG_RT_MUTEXES
1415	/* PI waiters blocked on a rt_mutex held by this task */
1416	struct plist_head pi_waiters;
1417	/* Deadlock detection and priority inheritance handling */
1418	struct rt_mutex_waiter *pi_blocked_on;
1419#endif
1420
1421#ifdef CONFIG_DEBUG_MUTEXES
1422	/* mutex deadlock detection */
1423	struct mutex_waiter *blocked_on;
1424#endif
1425#ifdef CONFIG_TRACE_IRQFLAGS
1426	unsigned int irq_events;
1427	unsigned long hardirq_enable_ip;
1428	unsigned long hardirq_disable_ip;
1429	unsigned int hardirq_enable_event;
1430	unsigned int hardirq_disable_event;
1431	int hardirqs_enabled;
1432	int hardirq_context;
1433	unsigned long softirq_disable_ip;
1434	unsigned long softirq_enable_ip;
1435	unsigned int softirq_disable_event;
1436	unsigned int softirq_enable_event;
1437	int softirqs_enabled;
1438	int softirq_context;
1439#endif
1440#ifdef CONFIG_LOCKDEP
1441# define MAX_LOCK_DEPTH 48UL
1442	u64 curr_chain_key;
1443	int lockdep_depth;
1444	unsigned int lockdep_recursion;
1445	struct held_lock held_locks[MAX_LOCK_DEPTH];
1446	gfp_t lockdep_reclaim_gfp;
1447#endif
1448
1449/* journalling filesystem info */
1450	void *journal_info;
1451
1452/* stacked block device info */
1453	struct bio_list *bio_list;
1454
1455#ifdef CONFIG_BLOCK
1456/* stack plugging */
1457	struct blk_plug *plug;
1458#endif
1459
1460/* VM state */
1461	struct reclaim_state *reclaim_state;
1462
1463	struct backing_dev_info *backing_dev_info;
1464
1465	struct io_context *io_context;
1466
1467	unsigned long ptrace_message;
1468	siginfo_t *last_siginfo; /* For ptrace use.  */
1469	struct task_io_accounting ioac;
1470#if defined(CONFIG_TASK_XACCT)
1471	u64 acct_rss_mem1;	/* accumulated rss usage */
1472	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1473	cputime_t acct_timexpd;	/* stime + utime since last update */
1474#endif
1475#ifdef CONFIG_CPUSETS
1476	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1477	int mems_allowed_change_disable;
1478	int cpuset_mem_spread_rotor;
1479	int cpuset_slab_spread_rotor;
1480#endif
1481#ifdef CONFIG_CGROUPS
1482	/* Control Group info protected by css_set_lock */
1483	struct css_set __rcu *cgroups;
1484	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1485	struct list_head cg_list;
1486#endif
1487#ifdef CONFIG_FUTEX
1488	struct robust_list_head __user *robust_list;
1489#ifdef CONFIG_COMPAT
1490	struct compat_robust_list_head __user *compat_robust_list;
1491#endif
1492	struct list_head pi_state_list;
1493	struct futex_pi_state *pi_state_cache;
1494#endif
1495#ifdef CONFIG_PERF_EVENTS
1496	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1497	struct mutex perf_event_mutex;
1498	struct list_head perf_event_list;
1499#endif
1500#ifdef CONFIG_NUMA
1501	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1502	short il_next;
1503	short pref_node_fork;
1504#endif
1505	atomic_t fs_excl;	/* holding fs exclusive resources */
1506	struct rcu_head rcu;
1507
1508	/*
1509	 * cache last used pipe for splice
1510	 */
1511	struct pipe_inode_info *splice_pipe;
1512#ifdef	CONFIG_TASK_DELAY_ACCT
1513	struct task_delay_info *delays;
1514#endif
1515#ifdef CONFIG_FAULT_INJECTION
1516	int make_it_fail;
1517#endif
1518	struct prop_local_single dirties;
1519#ifdef CONFIG_LATENCYTOP
1520	int latency_record_count;
1521	struct latency_record latency_record[LT_SAVECOUNT];
1522#endif
1523	/*
1524	 * time slack values; these are used to round up poll() and
1525	 * select() etc timeout values. These are in nanoseconds.
1526	 */
1527	unsigned long timer_slack_ns;
1528	unsigned long default_timer_slack_ns;
1529
1530	struct list_head	*scm_work_list;
1531#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1532	/* Index of current stored address in ret_stack */
1533	int curr_ret_stack;
1534	/* Stack of return addresses for return function tracing */
1535	struct ftrace_ret_stack	*ret_stack;
1536	/* time stamp for last schedule */
1537	unsigned long long ftrace_timestamp;
1538	/*
1539	 * Number of functions that haven't been traced
1540	 * because of depth overrun.
1541	 */
1542	atomic_t trace_overrun;
1543	/* Pause for the tracing */
1544	atomic_t tracing_graph_pause;
1545#endif
1546#ifdef CONFIG_TRACING
1547	/* state flags for use by tracers */
1548	unsigned long trace;
1549	/* bitmask of trace recursion */
1550	unsigned long trace_recursion;
1551#endif /* CONFIG_TRACING */
1552#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1553	struct memcg_batch_info {
1554		int do_batch;	/* incremented when batch uncharge started */
1555		struct mem_cgroup *memcg; /* target memcg of uncharge */
1556		unsigned long nr_pages;	/* uncharged usage */
1557		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1558	} memcg_batch;
1559#endif
1560#ifdef CONFIG_HAVE_HW_BREAKPOINT
1561	atomic_t ptrace_bp_refcnt;
1562#endif
1563};
1564
1565/* Future-safe accessor for struct task_struct's cpus_allowed. */
1566#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1567
1568/*
1569 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1570 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1571 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1572 * values are inverted: lower p->prio value means higher priority.
1573 *
1574 * The MAX_USER_RT_PRIO value allows the actual maximum
1575 * RT priority to be separate from the value exported to
1576 * user-space.  This allows kernel threads to set their
1577 * priority to a value higher than any user task. Note:
1578 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1579 */
1580
1581#define MAX_USER_RT_PRIO	100
1582#define MAX_RT_PRIO		MAX_USER_RT_PRIO
1583
1584#define MAX_PRIO		(MAX_RT_PRIO + 40)
1585#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1586
1587static inline int rt_prio(int prio)
1588{
1589	if (unlikely(prio < MAX_RT_PRIO))
1590		return 1;
1591	return 0;
1592}
1593
1594static inline int rt_task(struct task_struct *p)
1595{
1596	return rt_prio(p->prio);
1597}
1598
1599static inline struct pid *task_pid(struct task_struct *task)
1600{
1601	return task->pids[PIDTYPE_PID].pid;
1602}
1603
1604static inline struct pid *task_tgid(struct task_struct *task)
1605{
1606	return task->group_leader->pids[PIDTYPE_PID].pid;
1607}
1608
1609/*
1610 * Without tasklist or rcu lock it is not safe to dereference
1611 * the result of task_pgrp/task_session even if task == current,
1612 * we can race with another thread doing sys_setsid/sys_setpgid.
1613 */
1614static inline struct pid *task_pgrp(struct task_struct *task)
1615{
1616	return task->group_leader->pids[PIDTYPE_PGID].pid;
1617}
1618
1619static inline struct pid *task_session(struct task_struct *task)
1620{
1621	return task->group_leader->pids[PIDTYPE_SID].pid;
1622}
1623
1624struct pid_namespace;
1625
1626/*
1627 * the helpers to get the task's different pids as they are seen
1628 * from various namespaces
1629 *
1630 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1631 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1632 *                     current.
1633 * task_xid_nr_ns()  : id seen from the ns specified;
1634 *
1635 * set_task_vxid()   : assigns a virtual id to a task;
1636 *
1637 * see also pid_nr() etc in include/linux/pid.h
1638 */
1639pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1640			struct pid_namespace *ns);
1641
1642static inline pid_t task_pid_nr(struct task_struct *tsk)
1643{
1644	return tsk->pid;
1645}
1646
1647static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1648					struct pid_namespace *ns)
1649{
1650	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1651}
1652
1653static inline pid_t task_pid_vnr(struct task_struct *tsk)
1654{
1655	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1656}
1657
1658
1659static inline pid_t task_tgid_nr(struct task_struct *tsk)
1660{
1661	return tsk->tgid;
1662}
1663
1664pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1665
1666static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1667{
1668	return pid_vnr(task_tgid(tsk));
1669}
1670
1671
1672static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1673					struct pid_namespace *ns)
1674{
1675	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1676}
1677
1678static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1679{
1680	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1681}
1682
1683
1684static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1685					struct pid_namespace *ns)
1686{
1687	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1688}
1689
1690static inline pid_t task_session_vnr(struct task_struct *tsk)
1691{
1692	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1693}
1694
1695/* obsolete, do not use */
1696static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1697{
1698	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1699}
1700
1701/**
1702 * pid_alive - check that a task structure is not stale
1703 * @p: Task structure to be checked.
1704 *
1705 * Test if a process is not yet dead (at most zombie state)
1706 * If pid_alive fails, then pointers within the task structure
1707 * can be stale and must not be dereferenced.
1708 */
1709static inline int pid_alive(struct task_struct *p)
1710{
1711	return p->pids[PIDTYPE_PID].pid != NULL;
1712}
1713
1714/**
1715 * is_global_init - check if a task structure is init
1716 * @tsk: Task structure to be checked.
1717 *
1718 * Check if a task structure is the first user space task the kernel created.
1719 */
1720static inline int is_global_init(struct task_struct *tsk)
1721{
1722	return tsk->pid == 1;
1723}
1724
1725/*
1726 * is_container_init:
1727 * check whether in the task is init in its own pid namespace.
1728 */
1729extern int is_container_init(struct task_struct *tsk);
1730
1731extern struct pid *cad_pid;
1732
1733extern void free_task(struct task_struct *tsk);
1734#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1735
1736extern void __put_task_struct(struct task_struct *t);
1737
1738static inline void put_task_struct(struct task_struct *t)
1739{
1740	if (atomic_dec_and_test(&t->usage))
1741		__put_task_struct(t);
1742}
1743
1744extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1745extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1746
1747/*
1748 * Per process flags
1749 */
1750#define PF_STARTING	0x00000002	/* being created */
1751#define PF_EXITING	0x00000004	/* getting shut down */
1752#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1753#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1754#define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1755#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1756#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1757#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1758#define PF_DUMPCORE	0x00000200	/* dumped core */
1759#define PF_SIGNALED	0x00000400	/* killed by a signal */
1760#define PF_MEMALLOC	0x00000800	/* Allocating memory */
1761#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1762#define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
1763#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1764#define PF_FROZEN	0x00010000	/* frozen for system suspend */
1765#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1766#define PF_KSWAPD	0x00040000	/* I am kswapd */
1767#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1768#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1769#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1770#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1771#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1772#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1773#define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1774#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1775#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1776#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1777#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1778#define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
1779
1780/*
1781 * Only the _current_ task can read/write to tsk->flags, but other
1782 * tasks can access tsk->flags in readonly mode for example
1783 * with tsk_used_math (like during threaded core dumping).
1784 * There is however an exception to this rule during ptrace
1785 * or during fork: the ptracer task is allowed to write to the
1786 * child->flags of its traced child (same goes for fork, the parent
1787 * can write to the child->flags), because we're guaranteed the
1788 * child is not running and in turn not changing child->flags
1789 * at the same time the parent does it.
1790 */
1791#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1792#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1793#define clear_used_math() clear_stopped_child_used_math(current)
1794#define set_used_math() set_stopped_child_used_math(current)
1795#define conditional_stopped_child_used_math(condition, child) \
1796	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1797#define conditional_used_math(condition) \
1798	conditional_stopped_child_used_math(condition, current)
1799#define copy_to_stopped_child_used_math(child) \
1800	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1801/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1802#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1803#define used_math() tsk_used_math(current)
1804
1805/*
1806 * task->group_stop flags
1807 */
1808#define GROUP_STOP_SIGMASK	0xffff    /* signr of the last group stop */
1809#define GROUP_STOP_PENDING	(1 << 16) /* task should stop for group stop */
1810#define GROUP_STOP_CONSUME	(1 << 17) /* consume group stop count */
1811#define GROUP_STOP_TRAPPING	(1 << 18) /* switching from STOPPED to TRACED */
1812#define GROUP_STOP_DEQUEUED	(1 << 19) /* stop signal dequeued */
1813
1814extern void task_clear_group_stop_pending(struct task_struct *task);
1815
1816#ifdef CONFIG_PREEMPT_RCU
1817
1818#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1819#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
1820#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1821
1822static inline void rcu_copy_process(struct task_struct *p)
1823{
1824	p->rcu_read_lock_nesting = 0;
1825	p->rcu_read_unlock_special = 0;
1826#ifdef CONFIG_TREE_PREEMPT_RCU
1827	p->rcu_blocked_node = NULL;
1828#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1829#ifdef CONFIG_RCU_BOOST
1830	p->rcu_boost_mutex = NULL;
1831#endif /* #ifdef CONFIG_RCU_BOOST */
1832	INIT_LIST_HEAD(&p->rcu_node_entry);
1833}
1834
1835#else
1836
1837static inline void rcu_copy_process(struct task_struct *p)
1838{
1839}
1840
1841#endif
1842
1843#ifdef CONFIG_SMP
1844extern int set_cpus_allowed_ptr(struct task_struct *p,
1845				const struct cpumask *new_mask);
1846#else
1847static inline int set_cpus_allowed_ptr(struct task_struct *p,
1848				       const struct cpumask *new_mask)
1849{
1850	if (!cpumask_test_cpu(0, new_mask))
1851		return -EINVAL;
1852	return 0;
1853}
1854#endif
1855
1856#ifndef CONFIG_CPUMASK_OFFSTACK
1857static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1858{
1859	return set_cpus_allowed_ptr(p, &new_mask);
1860}
1861#endif
1862
1863/*
1864 * Do not use outside of architecture code which knows its limitations.
1865 *
1866 * sched_clock() has no promise of monotonicity or bounded drift between
1867 * CPUs, use (which you should not) requires disabling IRQs.
1868 *
1869 * Please use one of the three interfaces below.
1870 */
1871extern unsigned long long notrace sched_clock(void);
1872/*
1873 * See the comment in kernel/sched_clock.c
1874 */
1875extern u64 cpu_clock(int cpu);
1876extern u64 local_clock(void);
1877extern u64 sched_clock_cpu(int cpu);
1878
1879
1880extern void sched_clock_init(void);
1881
1882#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1883static inline void sched_clock_tick(void)
1884{
1885}
1886
1887static inline void sched_clock_idle_sleep_event(void)
1888{
1889}
1890
1891static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1892{
1893}
1894#else
1895/*
1896 * Architectures can set this to 1 if they have specified
1897 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1898 * but then during bootup it turns out that sched_clock()
1899 * is reliable after all:
1900 */
1901extern int sched_clock_stable;
1902
1903extern void sched_clock_tick(void);
1904extern void sched_clock_idle_sleep_event(void);
1905extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1906#endif
1907
1908#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1909/*
1910 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1911 * The reason for this explicit opt-in is not to have perf penalty with
1912 * slow sched_clocks.
1913 */
1914extern void enable_sched_clock_irqtime(void);
1915extern void disable_sched_clock_irqtime(void);
1916#else
1917static inline void enable_sched_clock_irqtime(void) {}
1918static inline void disable_sched_clock_irqtime(void) {}
1919#endif
1920
1921extern unsigned long long
1922task_sched_runtime(struct task_struct *task);
1923extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1924
1925/* sched_exec is called by processes performing an exec */
1926#ifdef CONFIG_SMP
1927extern void sched_exec(void);
1928#else
1929#define sched_exec()   {}
1930#endif
1931
1932extern void sched_clock_idle_sleep_event(void);
1933extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1934
1935#ifdef CONFIG_HOTPLUG_CPU
1936extern void idle_task_exit(void);
1937#else
1938static inline void idle_task_exit(void) {}
1939#endif
1940
1941#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1942extern void wake_up_idle_cpu(int cpu);
1943#else
1944static inline void wake_up_idle_cpu(int cpu) { }
1945#endif
1946
1947extern unsigned int sysctl_sched_latency;
1948extern unsigned int sysctl_sched_min_granularity;
1949extern unsigned int sysctl_sched_wakeup_granularity;
1950extern unsigned int sysctl_sched_child_runs_first;
1951
1952enum sched_tunable_scaling {
1953	SCHED_TUNABLESCALING_NONE,
1954	SCHED_TUNABLESCALING_LOG,
1955	SCHED_TUNABLESCALING_LINEAR,
1956	SCHED_TUNABLESCALING_END,
1957};
1958extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
1959
1960#ifdef CONFIG_SCHED_DEBUG
1961extern unsigned int sysctl_sched_migration_cost;
1962extern unsigned int sysctl_sched_nr_migrate;
1963extern unsigned int sysctl_sched_time_avg;
1964extern unsigned int sysctl_timer_migration;
1965extern unsigned int sysctl_sched_shares_window;
1966
1967int sched_proc_update_handler(struct ctl_table *table, int write,
1968		void __user *buffer, size_t *length,
1969		loff_t *ppos);
1970#endif
1971#ifdef CONFIG_SCHED_DEBUG
1972static inline unsigned int get_sysctl_timer_migration(void)
1973{
1974	return sysctl_timer_migration;
1975}
1976#else
1977static inline unsigned int get_sysctl_timer_migration(void)
1978{
1979	return 1;
1980}
1981#endif
1982extern unsigned int sysctl_sched_rt_period;
1983extern int sysctl_sched_rt_runtime;
1984
1985int sched_rt_handler(struct ctl_table *table, int write,
1986		void __user *buffer, size_t *lenp,
1987		loff_t *ppos);
1988
1989#ifdef CONFIG_SCHED_AUTOGROUP
1990extern unsigned int sysctl_sched_autogroup_enabled;
1991
1992extern void sched_autogroup_create_attach(struct task_struct *p);
1993extern void sched_autogroup_detach(struct task_struct *p);
1994extern void sched_autogroup_fork(struct signal_struct *sig);
1995extern void sched_autogroup_exit(struct signal_struct *sig);
1996#ifdef CONFIG_PROC_FS
1997extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1998extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
1999#endif
2000#else
2001static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2002static inline void sched_autogroup_detach(struct task_struct *p) { }
2003static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2004static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2005#endif
2006
2007#ifdef CONFIG_RT_MUTEXES
2008extern int rt_mutex_getprio(struct task_struct *p);
2009extern void rt_mutex_setprio(struct task_struct *p, int prio);
2010extern void rt_mutex_adjust_pi(struct task_struct *p);
2011#else
2012static inline int rt_mutex_getprio(struct task_struct *p)
2013{
2014	return p->normal_prio;
2015}
2016# define rt_mutex_adjust_pi(p)		do { } while (0)
2017#endif
2018
2019extern bool yield_to(struct task_struct *p, bool preempt);
2020extern void set_user_nice(struct task_struct *p, long nice);
2021extern int task_prio(const struct task_struct *p);
2022extern int task_nice(const struct task_struct *p);
2023extern int can_nice(const struct task_struct *p, const int nice);
2024extern int task_curr(const struct task_struct *p);
2025extern int idle_cpu(int cpu);
2026extern int sched_setscheduler(struct task_struct *, int,
2027			      const struct sched_param *);
2028extern int sched_setscheduler_nocheck(struct task_struct *, int,
2029				      const struct sched_param *);
2030extern struct task_struct *idle_task(int cpu);
2031extern struct task_struct *curr_task(int cpu);
2032extern void set_curr_task(int cpu, struct task_struct *p);
2033
2034void yield(void);
2035
2036/*
2037 * The default (Linux) execution domain.
2038 */
2039extern struct exec_domain	default_exec_domain;
2040
2041union thread_union {
2042	struct thread_info thread_info;
2043	unsigned long stack[THREAD_SIZE/sizeof(long)];
2044};
2045
2046#ifndef __HAVE_ARCH_KSTACK_END
2047static inline int kstack_end(void *addr)
2048{
2049	/* Reliable end of stack detection:
2050	 * Some APM bios versions misalign the stack
2051	 */
2052	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2053}
2054#endif
2055
2056extern union thread_union init_thread_union;
2057extern struct task_struct init_task;
2058
2059extern struct   mm_struct init_mm;
2060
2061extern struct pid_namespace init_pid_ns;
2062
2063/*
2064 * find a task by one of its numerical ids
2065 *
2066 * find_task_by_pid_ns():
2067 *      finds a task by its pid in the specified namespace
2068 * find_task_by_vpid():
2069 *      finds a task by its virtual pid
2070 *
2071 * see also find_vpid() etc in include/linux/pid.h
2072 */
2073
2074extern struct task_struct *find_task_by_vpid(pid_t nr);
2075extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2076		struct pid_namespace *ns);
2077
2078extern void __set_special_pids(struct pid *pid);
2079
2080/* per-UID process charging. */
2081extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
2082static inline struct user_struct *get_uid(struct user_struct *u)
2083{
2084	atomic_inc(&u->__count);
2085	return u;
2086}
2087extern void free_uid(struct user_struct *);
2088extern void release_uids(struct user_namespace *ns);
2089
2090#include <asm/current.h>
2091
2092extern void xtime_update(unsigned long ticks);
2093
2094extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2095extern int wake_up_process(struct task_struct *tsk);
2096extern void wake_up_new_task(struct task_struct *tsk);
2097#ifdef CONFIG_SMP
2098 extern void kick_process(struct task_struct *tsk);
2099#else
2100 static inline void kick_process(struct task_struct *tsk) { }
2101#endif
2102extern void sched_fork(struct task_struct *p);
2103extern void sched_dead(struct task_struct *p);
2104
2105extern void proc_caches_init(void);
2106extern void flush_signals(struct task_struct *);
2107extern void __flush_signals(struct task_struct *);
2108extern void ignore_signals(struct task_struct *);
2109extern void flush_signal_handlers(struct task_struct *, int force_default);
2110extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2111
2112static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2113{
2114	unsigned long flags;
2115	int ret;
2116
2117	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2118	ret = dequeue_signal(tsk, mask, info);
2119	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2120
2121	return ret;
2122}
2123
2124extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2125			      sigset_t *mask);
2126extern void unblock_all_signals(void);
2127extern void release_task(struct task_struct * p);
2128extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2129extern int force_sigsegv(int, struct task_struct *);
2130extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2131extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2132extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2133extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
2134extern int kill_pgrp(struct pid *pid, int sig, int priv);
2135extern int kill_pid(struct pid *pid, int sig, int priv);
2136extern int kill_proc_info(int, struct siginfo *, pid_t);
2137extern int do_notify_parent(struct task_struct *, int);
2138extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2139extern void force_sig(int, struct task_struct *);
2140extern int send_sig(int, struct task_struct *, int);
2141extern int zap_other_threads(struct task_struct *p);
2142extern struct sigqueue *sigqueue_alloc(void);
2143extern void sigqueue_free(struct sigqueue *);
2144extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2145extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2146extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2147
2148static inline int kill_cad_pid(int sig, int priv)
2149{
2150	return kill_pid(cad_pid, sig, priv);
2151}
2152
2153/* These can be the second arg to send_sig_info/send_group_sig_info.  */
2154#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2155#define SEND_SIG_PRIV	((struct siginfo *) 1)
2156#define SEND_SIG_FORCED	((struct siginfo *) 2)
2157
2158/*
2159 * True if we are on the alternate signal stack.
2160 */
2161static inline int on_sig_stack(unsigned long sp)
2162{
2163#ifdef CONFIG_STACK_GROWSUP
2164	return sp >= current->sas_ss_sp &&
2165		sp - current->sas_ss_sp < current->sas_ss_size;
2166#else
2167	return sp > current->sas_ss_sp &&
2168		sp - current->sas_ss_sp <= current->sas_ss_size;
2169#endif
2170}
2171
2172static inline int sas_ss_flags(unsigned long sp)
2173{
2174	return (current->sas_ss_size == 0 ? SS_DISABLE
2175		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2176}
2177
2178/*
2179 * Routines for handling mm_structs
2180 */
2181extern struct mm_struct * mm_alloc(void);
2182
2183/* mmdrop drops the mm and the page tables */
2184extern void __mmdrop(struct mm_struct *);
2185static inline void mmdrop(struct mm_struct * mm)
2186{
2187	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2188		__mmdrop(mm);
2189}
2190extern int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm);
2191
2192/* mmput gets rid of the mappings and all user-space */
2193extern void mmput(struct mm_struct *);
2194/* Grab a reference to a task's mm, if it is not already going away */
2195extern struct mm_struct *get_task_mm(struct task_struct *task);
2196/* Remove the current tasks stale references to the old mm_struct */
2197extern void mm_release(struct task_struct *, struct mm_struct *);
2198/* Allocate a new mm structure and copy contents from tsk->mm */
2199extern struct mm_struct *dup_mm(struct task_struct *tsk);
2200
2201extern int copy_thread(unsigned long, unsigned long, unsigned long,
2202			struct task_struct *, struct pt_regs *);
2203extern void flush_thread(void);
2204extern void exit_thread(void);
2205
2206extern void exit_files(struct task_struct *);
2207extern void __cleanup_sighand(struct sighand_struct *);
2208
2209extern void exit_itimers(struct signal_struct *);
2210extern void flush_itimer_signals(void);
2211
2212extern NORET_TYPE void do_group_exit(int);
2213
2214extern void daemonize(const char *, ...);
2215extern int allow_signal(int);
2216extern int disallow_signal(int);
2217
2218extern int do_execve(const char *,
2219		     const char __user * const __user *,
2220		     const char __user * const __user *, struct pt_regs *);
2221extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2222struct task_struct *fork_idle(int);
2223
2224extern void set_task_comm(struct task_struct *tsk, char *from);
2225extern char *get_task_comm(char *to, struct task_struct *tsk);
2226
2227#ifdef CONFIG_SMP
2228void scheduler_ipi(void);
2229extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2230#else
2231static inline void scheduler_ipi(void) { }
2232static inline unsigned long wait_task_inactive(struct task_struct *p,
2233					       long match_state)
2234{
2235	return 1;
2236}
2237#endif
2238
2239#define next_task(p) \
2240	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2241
2242#define for_each_process(p) \
2243	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2244
2245extern bool current_is_single_threaded(void);
2246
2247/*
2248 * Careful: do_each_thread/while_each_thread is a double loop so
2249 *          'break' will not work as expected - use goto instead.
2250 */
2251#define do_each_thread(g, t) \
2252	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2253
2254#define while_each_thread(g, t) \
2255	while ((t = next_thread(t)) != g)
2256
2257static inline int get_nr_threads(struct task_struct *tsk)
2258{
2259	return tsk->signal->nr_threads;
2260}
2261
2262/* de_thread depends on thread_group_leader not being a pid based check */
2263#define thread_group_leader(p)	(p == p->group_leader)
2264
2265/* Do to the insanities of de_thread it is possible for a process
2266 * to have the pid of the thread group leader without actually being
2267 * the thread group leader.  For iteration through the pids in proc
2268 * all we care about is that we have a task with the appropriate
2269 * pid, we don't actually care if we have the right task.
2270 */
2271static inline int has_group_leader_pid(struct task_struct *p)
2272{
2273	return p->pid == p->tgid;
2274}
2275
2276static inline
2277int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2278{
2279	return p1->tgid == p2->tgid;
2280}
2281
2282static inline struct task_struct *next_thread(const struct task_struct *p)
2283{
2284	return list_entry_rcu(p->thread_group.next,
2285			      struct task_struct, thread_group);
2286}
2287
2288static inline int thread_group_empty(struct task_struct *p)
2289{
2290	return list_empty(&p->thread_group);
2291}
2292
2293#define delay_group_leader(p) \
2294		(thread_group_leader(p) && !thread_group_empty(p))
2295
2296static inline int task_detached(struct task_struct *p)
2297{
2298	return p->exit_signal == -1;
2299}
2300
2301/*
2302 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2303 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2304 * pins the final release of task.io_context.  Also protects ->cpuset and
2305 * ->cgroup.subsys[].
2306 *
2307 * Nests both inside and outside of read_lock(&tasklist_lock).
2308 * It must not be nested with write_lock_irq(&tasklist_lock),
2309 * neither inside nor outside.
2310 */
2311static inline void task_lock(struct task_struct *p)
2312{
2313	spin_lock(&p->alloc_lock);
2314}
2315
2316static inline void task_unlock(struct task_struct *p)
2317{
2318	spin_unlock(&p->alloc_lock);
2319}
2320
2321extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2322							unsigned long *flags);
2323
2324#define lock_task_sighand(tsk, flags)					\
2325({	struct sighand_struct *__ss;					\
2326	__cond_lock(&(tsk)->sighand->siglock,				\
2327		    (__ss = __lock_task_sighand(tsk, flags)));		\
2328	__ss;								\
2329})									\
2330
2331static inline void unlock_task_sighand(struct task_struct *tsk,
2332						unsigned long *flags)
2333{
2334	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2335}
2336
2337/* See the declaration of threadgroup_fork_lock in signal_struct. */
2338#ifdef CONFIG_CGROUPS
2339static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
2340{
2341	down_read(&tsk->signal->threadgroup_fork_lock);
2342}
2343static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
2344{
2345	up_read(&tsk->signal->threadgroup_fork_lock);
2346}
2347static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
2348{
2349	down_write(&tsk->signal->threadgroup_fork_lock);
2350}
2351static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
2352{
2353	up_write(&tsk->signal->threadgroup_fork_lock);
2354}
2355#else
2356static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
2357static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
2358static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
2359static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
2360#endif
2361
2362#ifndef __HAVE_THREAD_FUNCTIONS
2363
2364#define task_thread_info(task)	((struct thread_info *)(task)->stack)
2365#define task_stack_page(task)	((task)->stack)
2366
2367static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2368{
2369	*task_thread_info(p) = *task_thread_info(org);
2370	task_thread_info(p)->task = p;
2371}
2372
2373static inline unsigned long *end_of_stack(struct task_struct *p)
2374{
2375	return (unsigned long *)(task_thread_info(p) + 1);
2376}
2377
2378#endif
2379
2380static inline int object_is_on_stack(void *obj)
2381{
2382	void *stack = task_stack_page(current);
2383
2384	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2385}
2386
2387extern void thread_info_cache_init(void);
2388
2389#ifdef CONFIG_DEBUG_STACK_USAGE
2390static inline unsigned long stack_not_used(struct task_struct *p)
2391{
2392	unsigned long *n = end_of_stack(p);
2393
2394	do { 	/* Skip over canary */
2395		n++;
2396	} while (!*n);
2397
2398	return (unsigned long)n - (unsigned long)end_of_stack(p);
2399}
2400#endif
2401
2402/* set thread flags in other task's structures
2403 * - see asm/thread_info.h for TIF_xxxx flags available
2404 */
2405static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2406{
2407	set_ti_thread_flag(task_thread_info(tsk), flag);
2408}
2409
2410static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2411{
2412	clear_ti_thread_flag(task_thread_info(tsk), flag);
2413}
2414
2415static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2416{
2417	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2418}
2419
2420static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2421{
2422	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2423}
2424
2425static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2426{
2427	return test_ti_thread_flag(task_thread_info(tsk), flag);
2428}
2429
2430static inline void set_tsk_need_resched(struct task_struct *tsk)
2431{
2432	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2433}
2434
2435static inline void clear_tsk_need_resched(struct task_struct *tsk)
2436{
2437	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2438}
2439
2440static inline int test_tsk_need_resched(struct task_struct *tsk)
2441{
2442	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2443}
2444
2445static inline int restart_syscall(void)
2446{
2447	set_tsk_thread_flag(current, TIF_SIGPENDING);
2448	return -ERESTARTNOINTR;
2449}
2450
2451static inline int signal_pending(struct task_struct *p)
2452{
2453	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2454}
2455
2456static inline int __fatal_signal_pending(struct task_struct *p)
2457{
2458	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2459}
2460
2461static inline int fatal_signal_pending(struct task_struct *p)
2462{
2463	return signal_pending(p) && __fatal_signal_pending(p);
2464}
2465
2466static inline int signal_pending_state(long state, struct task_struct *p)
2467{
2468	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2469		return 0;
2470	if (!signal_pending(p))
2471		return 0;
2472
2473	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2474}
2475
2476static inline int need_resched(void)
2477{
2478	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2479}
2480
2481/*
2482 * cond_resched() and cond_resched_lock(): latency reduction via
2483 * explicit rescheduling in places that are safe. The return
2484 * value indicates whether a reschedule was done in fact.
2485 * cond_resched_lock() will drop the spinlock before scheduling,
2486 * cond_resched_softirq() will enable bhs before scheduling.
2487 */
2488extern int _cond_resched(void);
2489
2490#define cond_resched() ({			\
2491	__might_sleep(__FILE__, __LINE__, 0);	\
2492	_cond_resched();			\
2493})
2494
2495extern int __cond_resched_lock(spinlock_t *lock);
2496
2497#ifdef CONFIG_PREEMPT
2498#define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2499#else
2500#define PREEMPT_LOCK_OFFSET	0
2501#endif
2502
2503#define cond_resched_lock(lock) ({				\
2504	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2505	__cond_resched_lock(lock);				\
2506})
2507
2508extern int __cond_resched_softirq(void);
2509
2510#define cond_resched_softirq() ({					\
2511	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2512	__cond_resched_softirq();					\
2513})
2514
2515/*
2516 * Does a critical section need to be broken due to another
2517 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2518 * but a general need for low latency)
2519 */
2520static inline int spin_needbreak(spinlock_t *lock)
2521{
2522#ifdef CONFIG_PREEMPT
2523	return spin_is_contended(lock);
2524#else
2525	return 0;
2526#endif
2527}
2528
2529/*
2530 * Thread group CPU time accounting.
2531 */
2532void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2533void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2534
2535static inline void thread_group_cputime_init(struct signal_struct *sig)
2536{
2537	spin_lock_init(&sig->cputimer.lock);
2538}
2539
2540/*
2541 * Reevaluate whether the task has signals pending delivery.
2542 * Wake the task if so.
2543 * This is required every time the blocked sigset_t changes.
2544 * callers must hold sighand->siglock.
2545 */
2546extern void recalc_sigpending_and_wake(struct task_struct *t);
2547extern void recalc_sigpending(void);
2548
2549extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2550
2551/*
2552 * Wrappers for p->thread_info->cpu access. No-op on UP.
2553 */
2554#ifdef CONFIG_SMP
2555
2556static inline unsigned int task_cpu(const struct task_struct *p)
2557{
2558	return task_thread_info(p)->cpu;
2559}
2560
2561extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2562
2563#else
2564
2565static inline unsigned int task_cpu(const struct task_struct *p)
2566{
2567	return 0;
2568}
2569
2570static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2571{
2572}
2573
2574#endif /* CONFIG_SMP */
2575
2576extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2577extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2578
2579extern void normalize_rt_tasks(void);
2580
2581#ifdef CONFIG_CGROUP_SCHED
2582
2583extern struct task_group root_task_group;
2584
2585extern struct task_group *sched_create_group(struct task_group *parent);
2586extern void sched_destroy_group(struct task_group *tg);
2587extern void sched_move_task(struct task_struct *tsk);
2588#ifdef CONFIG_FAIR_GROUP_SCHED
2589extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2590extern unsigned long sched_group_shares(struct task_group *tg);
2591#endif
2592#ifdef CONFIG_RT_GROUP_SCHED
2593extern int sched_group_set_rt_runtime(struct task_group *tg,
2594				      long rt_runtime_us);
2595extern long sched_group_rt_runtime(struct task_group *tg);
2596extern int sched_group_set_rt_period(struct task_group *tg,
2597				      long rt_period_us);
2598extern long sched_group_rt_period(struct task_group *tg);
2599extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2600#endif
2601#endif
2602
2603extern int task_can_switch_user(struct user_struct *up,
2604					struct task_struct *tsk);
2605
2606#ifdef CONFIG_TASK_XACCT
2607static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2608{
2609	tsk->ioac.rchar += amt;
2610}
2611
2612static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2613{
2614	tsk->ioac.wchar += amt;
2615}
2616
2617static inline void inc_syscr(struct task_struct *tsk)
2618{
2619	tsk->ioac.syscr++;
2620}
2621
2622static inline void inc_syscw(struct task_struct *tsk)
2623{
2624	tsk->ioac.syscw++;
2625}
2626#else
2627static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2628{
2629}
2630
2631static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2632{
2633}
2634
2635static inline void inc_syscr(struct task_struct *tsk)
2636{
2637}
2638
2639static inline void inc_syscw(struct task_struct *tsk)
2640{
2641}
2642#endif
2643
2644#ifndef TASK_SIZE_OF
2645#define TASK_SIZE_OF(tsk)	TASK_SIZE
2646#endif
2647
2648#ifdef CONFIG_MM_OWNER
2649extern void mm_update_next_owner(struct mm_struct *mm);
2650extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2651#else
2652static inline void mm_update_next_owner(struct mm_struct *mm)
2653{
2654}
2655
2656static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2657{
2658}
2659#endif /* CONFIG_MM_OWNER */
2660
2661static inline unsigned long task_rlimit(const struct task_struct *tsk,
2662		unsigned int limit)
2663{
2664	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2665}
2666
2667static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2668		unsigned int limit)
2669{
2670	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2671}
2672
2673static inline unsigned long rlimit(unsigned int limit)
2674{
2675	return task_rlimit(current, limit);
2676}
2677
2678static inline unsigned long rlimit_max(unsigned int limit)
2679{
2680	return task_rlimit_max(current, limit);
2681}
2682
2683#endif /* __KERNEL__ */
2684
2685#endif
2686