sched.h revision f0af911a9dec9de702645182c8d269449e24d24b
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4/*
5 * cloning flags:
6 */
7#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8#define CLONE_VM	0x00000100	/* set if VM shared between processes */
9#define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10#define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15#define CLONE_THREAD	0x00010000	/* Same thread group? */
16#define CLONE_NEWNS	0x00020000	/* New namespace group? */
17#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21#define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25   and is now available for re-use. */
26#define CLONE_NEWUTS		0x04000000	/* New utsname group? */
27#define CLONE_NEWIPC		0x08000000	/* New ipcs */
28#define CLONE_NEWUSER		0x10000000	/* New user namespace */
29#define CLONE_NEWPID		0x20000000	/* New pid namespace */
30#define CLONE_NEWNET		0x40000000	/* New network namespace */
31#define CLONE_IO		0x80000000	/* Clone io context */
32
33/*
34 * Scheduling policies
35 */
36#define SCHED_NORMAL		0
37#define SCHED_FIFO		1
38#define SCHED_RR		2
39#define SCHED_BATCH		3
40/* SCHED_ISO: reserved but not implemented yet */
41#define SCHED_IDLE		5
42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43#define SCHED_RESET_ON_FORK     0x40000000
44
45#ifdef __KERNEL__
46
47struct sched_param {
48	int sched_priority;
49};
50
51#include <asm/param.h>	/* for HZ */
52
53#include <linux/capability.h>
54#include <linux/threads.h>
55#include <linux/kernel.h>
56#include <linux/types.h>
57#include <linux/timex.h>
58#include <linux/jiffies.h>
59#include <linux/rbtree.h>
60#include <linux/thread_info.h>
61#include <linux/cpumask.h>
62#include <linux/errno.h>
63#include <linux/nodemask.h>
64#include <linux/mm_types.h>
65
66#include <asm/system.h>
67#include <asm/page.h>
68#include <asm/ptrace.h>
69#include <asm/cputime.h>
70
71#include <linux/smp.h>
72#include <linux/sem.h>
73#include <linux/signal.h>
74#include <linux/compiler.h>
75#include <linux/completion.h>
76#include <linux/pid.h>
77#include <linux/percpu.h>
78#include <linux/topology.h>
79#include <linux/proportions.h>
80#include <linux/seccomp.h>
81#include <linux/rcupdate.h>
82#include <linux/rculist.h>
83#include <linux/rtmutex.h>
84
85#include <linux/time.h>
86#include <linux/param.h>
87#include <linux/resource.h>
88#include <linux/timer.h>
89#include <linux/hrtimer.h>
90#include <linux/task_io_accounting.h>
91#include <linux/latencytop.h>
92#include <linux/cred.h>
93
94#include <asm/processor.h>
95
96struct exec_domain;
97struct futex_pi_state;
98struct robust_list_head;
99struct bio_list;
100struct fs_struct;
101struct perf_event_context;
102
103/*
104 * List of flags we want to share for kernel threads,
105 * if only because they are not used by them anyway.
106 */
107#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
108
109/*
110 * These are the constant used to fake the fixed-point load-average
111 * counting. Some notes:
112 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
113 *    a load-average precision of 10 bits integer + 11 bits fractional
114 *  - if you want to count load-averages more often, you need more
115 *    precision, or rounding will get you. With 2-second counting freq,
116 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
117 *    11 bit fractions.
118 */
119extern unsigned long avenrun[];		/* Load averages */
120extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
121
122#define FSHIFT		11		/* nr of bits of precision */
123#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
124#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
125#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
126#define EXP_5		2014		/* 1/exp(5sec/5min) */
127#define EXP_15		2037		/* 1/exp(5sec/15min) */
128
129#define CALC_LOAD(load,exp,n) \
130	load *= exp; \
131	load += n*(FIXED_1-exp); \
132	load >>= FSHIFT;
133
134extern unsigned long total_forks;
135extern int nr_threads;
136DECLARE_PER_CPU(unsigned long, process_counts);
137extern int nr_processes(void);
138extern unsigned long nr_running(void);
139extern unsigned long nr_uninterruptible(void);
140extern unsigned long nr_iowait(void);
141extern unsigned long nr_iowait_cpu(int cpu);
142extern unsigned long this_cpu_load(void);
143
144
145extern void calc_global_load(unsigned long ticks);
146
147extern unsigned long get_parent_ip(unsigned long addr);
148
149struct seq_file;
150struct cfs_rq;
151struct task_group;
152#ifdef CONFIG_SCHED_DEBUG
153extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
154extern void proc_sched_set_task(struct task_struct *p);
155extern void
156print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
157#else
158static inline void
159proc_sched_show_task(struct task_struct *p, struct seq_file *m)
160{
161}
162static inline void proc_sched_set_task(struct task_struct *p)
163{
164}
165static inline void
166print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
167{
168}
169#endif
170
171/*
172 * Task state bitmask. NOTE! These bits are also
173 * encoded in fs/proc/array.c: get_task_state().
174 *
175 * We have two separate sets of flags: task->state
176 * is about runnability, while task->exit_state are
177 * about the task exiting. Confusing, but this way
178 * modifying one set can't modify the other one by
179 * mistake.
180 */
181#define TASK_RUNNING		0
182#define TASK_INTERRUPTIBLE	1
183#define TASK_UNINTERRUPTIBLE	2
184#define __TASK_STOPPED		4
185#define __TASK_TRACED		8
186/* in tsk->exit_state */
187#define EXIT_ZOMBIE		16
188#define EXIT_DEAD		32
189/* in tsk->state again */
190#define TASK_DEAD		64
191#define TASK_WAKEKILL		128
192#define TASK_WAKING		256
193#define TASK_STATE_MAX		512
194
195#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
196
197extern char ___assert_task_state[1 - 2*!!(
198		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
199
200/* Convenience macros for the sake of set_task_state */
201#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
202#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
203#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
204
205/* Convenience macros for the sake of wake_up */
206#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
207#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
208
209/* get_task_state() */
210#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
211				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
212				 __TASK_TRACED)
213
214#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
215#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
216#define task_is_dead(task)	((task)->exit_state != 0)
217#define task_is_stopped_or_traced(task)	\
218			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
219#define task_contributes_to_load(task)	\
220				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
221				 (task->flags & PF_FREEZING) == 0)
222
223#define __set_task_state(tsk, state_value)		\
224	do { (tsk)->state = (state_value); } while (0)
225#define set_task_state(tsk, state_value)		\
226	set_mb((tsk)->state, (state_value))
227
228/*
229 * set_current_state() includes a barrier so that the write of current->state
230 * is correctly serialised wrt the caller's subsequent test of whether to
231 * actually sleep:
232 *
233 *	set_current_state(TASK_UNINTERRUPTIBLE);
234 *	if (do_i_need_to_sleep())
235 *		schedule();
236 *
237 * If the caller does not need such serialisation then use __set_current_state()
238 */
239#define __set_current_state(state_value)			\
240	do { current->state = (state_value); } while (0)
241#define set_current_state(state_value)		\
242	set_mb(current->state, (state_value))
243
244/* Task command name length */
245#define TASK_COMM_LEN 16
246
247#include <linux/spinlock.h>
248
249/*
250 * This serializes "schedule()" and also protects
251 * the run-queue from deletions/modifications (but
252 * _adding_ to the beginning of the run-queue has
253 * a separate lock).
254 */
255extern rwlock_t tasklist_lock;
256extern spinlock_t mmlist_lock;
257
258struct task_struct;
259
260#ifdef CONFIG_PROVE_RCU
261extern int lockdep_tasklist_lock_is_held(void);
262#endif /* #ifdef CONFIG_PROVE_RCU */
263
264extern void sched_init(void);
265extern void sched_init_smp(void);
266extern asmlinkage void schedule_tail(struct task_struct *prev);
267extern void init_idle(struct task_struct *idle, int cpu);
268extern void init_idle_bootup_task(struct task_struct *idle);
269
270extern int runqueue_is_locked(int cpu);
271
272extern cpumask_var_t nohz_cpu_mask;
273#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
274extern void select_nohz_load_balancer(int stop_tick);
275extern int get_nohz_timer_target(void);
276#else
277static inline void select_nohz_load_balancer(int stop_tick) { }
278#endif
279
280/*
281 * Only dump TASK_* tasks. (0 for all tasks)
282 */
283extern void show_state_filter(unsigned long state_filter);
284
285static inline void show_state(void)
286{
287	show_state_filter(0);
288}
289
290extern void show_regs(struct pt_regs *);
291
292/*
293 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
294 * task), SP is the stack pointer of the first frame that should be shown in the back
295 * trace (or NULL if the entire call-chain of the task should be shown).
296 */
297extern void show_stack(struct task_struct *task, unsigned long *sp);
298
299void io_schedule(void);
300long io_schedule_timeout(long timeout);
301
302extern void cpu_init (void);
303extern void trap_init(void);
304extern void update_process_times(int user);
305extern void scheduler_tick(void);
306
307extern void sched_show_task(struct task_struct *p);
308
309#ifdef CONFIG_LOCKUP_DETECTOR
310extern void touch_softlockup_watchdog(void);
311extern void touch_softlockup_watchdog_sync(void);
312extern void touch_all_softlockup_watchdogs(void);
313extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
314				  void __user *buffer,
315				  size_t *lenp, loff_t *ppos);
316extern unsigned int  softlockup_panic;
317extern int softlockup_thresh;
318void lockup_detector_init(void);
319#else
320static inline void touch_softlockup_watchdog(void)
321{
322}
323static inline void touch_softlockup_watchdog_sync(void)
324{
325}
326static inline void touch_all_softlockup_watchdogs(void)
327{
328}
329static inline void lockup_detector_init(void)
330{
331}
332#endif
333
334#ifdef CONFIG_DETECT_HUNG_TASK
335extern unsigned int  sysctl_hung_task_panic;
336extern unsigned long sysctl_hung_task_check_count;
337extern unsigned long sysctl_hung_task_timeout_secs;
338extern unsigned long sysctl_hung_task_warnings;
339extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
340					 void __user *buffer,
341					 size_t *lenp, loff_t *ppos);
342#else
343/* Avoid need for ifdefs elsewhere in the code */
344enum { sysctl_hung_task_timeout_secs = 0 };
345#endif
346
347/* Attach to any functions which should be ignored in wchan output. */
348#define __sched		__attribute__((__section__(".sched.text")))
349
350/* Linker adds these: start and end of __sched functions */
351extern char __sched_text_start[], __sched_text_end[];
352
353/* Is this address in the __sched functions? */
354extern int in_sched_functions(unsigned long addr);
355
356#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
357extern signed long schedule_timeout(signed long timeout);
358extern signed long schedule_timeout_interruptible(signed long timeout);
359extern signed long schedule_timeout_killable(signed long timeout);
360extern signed long schedule_timeout_uninterruptible(signed long timeout);
361asmlinkage void schedule(void);
362extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
363
364struct nsproxy;
365struct user_namespace;
366
367/*
368 * Default maximum number of active map areas, this limits the number of vmas
369 * per mm struct. Users can overwrite this number by sysctl but there is a
370 * problem.
371 *
372 * When a program's coredump is generated as ELF format, a section is created
373 * per a vma. In ELF, the number of sections is represented in unsigned short.
374 * This means the number of sections should be smaller than 65535 at coredump.
375 * Because the kernel adds some informative sections to a image of program at
376 * generating coredump, we need some margin. The number of extra sections is
377 * 1-3 now and depends on arch. We use "5" as safe margin, here.
378 */
379#define MAPCOUNT_ELF_CORE_MARGIN	(5)
380#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
381
382extern int sysctl_max_map_count;
383
384#include <linux/aio.h>
385
386#ifdef CONFIG_MMU
387extern void arch_pick_mmap_layout(struct mm_struct *mm);
388extern unsigned long
389arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
390		       unsigned long, unsigned long);
391extern unsigned long
392arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
393			  unsigned long len, unsigned long pgoff,
394			  unsigned long flags);
395extern void arch_unmap_area(struct mm_struct *, unsigned long);
396extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
397#else
398static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
399#endif
400
401
402extern void set_dumpable(struct mm_struct *mm, int value);
403extern int get_dumpable(struct mm_struct *mm);
404
405/* mm flags */
406/* dumpable bits */
407#define MMF_DUMPABLE      0  /* core dump is permitted */
408#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
409
410#define MMF_DUMPABLE_BITS 2
411#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
412
413/* coredump filter bits */
414#define MMF_DUMP_ANON_PRIVATE	2
415#define MMF_DUMP_ANON_SHARED	3
416#define MMF_DUMP_MAPPED_PRIVATE	4
417#define MMF_DUMP_MAPPED_SHARED	5
418#define MMF_DUMP_ELF_HEADERS	6
419#define MMF_DUMP_HUGETLB_PRIVATE 7
420#define MMF_DUMP_HUGETLB_SHARED  8
421
422#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
423#define MMF_DUMP_FILTER_BITS	7
424#define MMF_DUMP_FILTER_MASK \
425	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
426#define MMF_DUMP_FILTER_DEFAULT \
427	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
428	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
429
430#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
431# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
432#else
433# define MMF_DUMP_MASK_DEFAULT_ELF	0
434#endif
435					/* leave room for more dump flags */
436#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
437#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
438
439#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
440
441struct sighand_struct {
442	atomic_t		count;
443	struct k_sigaction	action[_NSIG];
444	spinlock_t		siglock;
445	wait_queue_head_t	signalfd_wqh;
446};
447
448struct pacct_struct {
449	int			ac_flag;
450	long			ac_exitcode;
451	unsigned long		ac_mem;
452	cputime_t		ac_utime, ac_stime;
453	unsigned long		ac_minflt, ac_majflt;
454};
455
456struct cpu_itimer {
457	cputime_t expires;
458	cputime_t incr;
459	u32 error;
460	u32 incr_error;
461};
462
463/**
464 * struct task_cputime - collected CPU time counts
465 * @utime:		time spent in user mode, in &cputime_t units
466 * @stime:		time spent in kernel mode, in &cputime_t units
467 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
468 *
469 * This structure groups together three kinds of CPU time that are
470 * tracked for threads and thread groups.  Most things considering
471 * CPU time want to group these counts together and treat all three
472 * of them in parallel.
473 */
474struct task_cputime {
475	cputime_t utime;
476	cputime_t stime;
477	unsigned long long sum_exec_runtime;
478};
479/* Alternate field names when used to cache expirations. */
480#define prof_exp	stime
481#define virt_exp	utime
482#define sched_exp	sum_exec_runtime
483
484#define INIT_CPUTIME	\
485	(struct task_cputime) {					\
486		.utime = cputime_zero,				\
487		.stime = cputime_zero,				\
488		.sum_exec_runtime = 0,				\
489	}
490
491/*
492 * Disable preemption until the scheduler is running.
493 * Reset by start_kernel()->sched_init()->init_idle().
494 *
495 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
496 * before the scheduler is active -- see should_resched().
497 */
498#define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
499
500/**
501 * struct thread_group_cputimer - thread group interval timer counts
502 * @cputime:		thread group interval timers.
503 * @running:		non-zero when there are timers running and
504 * 			@cputime receives updates.
505 * @lock:		lock for fields in this struct.
506 *
507 * This structure contains the version of task_cputime, above, that is
508 * used for thread group CPU timer calculations.
509 */
510struct thread_group_cputimer {
511	struct task_cputime cputime;
512	int running;
513	spinlock_t lock;
514};
515
516struct autogroup;
517
518/*
519 * NOTE! "signal_struct" does not have it's own
520 * locking, because a shared signal_struct always
521 * implies a shared sighand_struct, so locking
522 * sighand_struct is always a proper superset of
523 * the locking of signal_struct.
524 */
525struct signal_struct {
526	atomic_t		sigcnt;
527	atomic_t		live;
528	int			nr_threads;
529
530	wait_queue_head_t	wait_chldexit;	/* for wait4() */
531
532	/* current thread group signal load-balancing target: */
533	struct task_struct	*curr_target;
534
535	/* shared signal handling: */
536	struct sigpending	shared_pending;
537
538	/* thread group exit support */
539	int			group_exit_code;
540	/* overloaded:
541	 * - notify group_exit_task when ->count is equal to notify_count
542	 * - everyone except group_exit_task is stopped during signal delivery
543	 *   of fatal signals, group_exit_task processes the signal.
544	 */
545	int			notify_count;
546	struct task_struct	*group_exit_task;
547
548	/* thread group stop support, overloads group_exit_code too */
549	int			group_stop_count;
550	unsigned int		flags; /* see SIGNAL_* flags below */
551
552	/* POSIX.1b Interval Timers */
553	struct list_head posix_timers;
554
555	/* ITIMER_REAL timer for the process */
556	struct hrtimer real_timer;
557	struct pid *leader_pid;
558	ktime_t it_real_incr;
559
560	/*
561	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
562	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
563	 * values are defined to 0 and 1 respectively
564	 */
565	struct cpu_itimer it[2];
566
567	/*
568	 * Thread group totals for process CPU timers.
569	 * See thread_group_cputimer(), et al, for details.
570	 */
571	struct thread_group_cputimer cputimer;
572
573	/* Earliest-expiration cache. */
574	struct task_cputime cputime_expires;
575
576	struct list_head cpu_timers[3];
577
578	struct pid *tty_old_pgrp;
579
580	/* boolean value for session group leader */
581	int leader;
582
583	struct tty_struct *tty; /* NULL if no tty */
584
585#ifdef CONFIG_SCHED_AUTOGROUP
586	struct autogroup *autogroup;
587#endif
588	/*
589	 * Cumulative resource counters for dead threads in the group,
590	 * and for reaped dead child processes forked by this group.
591	 * Live threads maintain their own counters and add to these
592	 * in __exit_signal, except for the group leader.
593	 */
594	cputime_t utime, stime, cutime, cstime;
595	cputime_t gtime;
596	cputime_t cgtime;
597#ifndef CONFIG_VIRT_CPU_ACCOUNTING
598	cputime_t prev_utime, prev_stime;
599#endif
600	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
601	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
602	unsigned long inblock, oublock, cinblock, coublock;
603	unsigned long maxrss, cmaxrss;
604	struct task_io_accounting ioac;
605
606	/*
607	 * Cumulative ns of schedule CPU time fo dead threads in the
608	 * group, not including a zombie group leader, (This only differs
609	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
610	 * other than jiffies.)
611	 */
612	unsigned long long sum_sched_runtime;
613
614	/*
615	 * We don't bother to synchronize most readers of this at all,
616	 * because there is no reader checking a limit that actually needs
617	 * to get both rlim_cur and rlim_max atomically, and either one
618	 * alone is a single word that can safely be read normally.
619	 * getrlimit/setrlimit use task_lock(current->group_leader) to
620	 * protect this instead of the siglock, because they really
621	 * have no need to disable irqs.
622	 */
623	struct rlimit rlim[RLIM_NLIMITS];
624
625#ifdef CONFIG_BSD_PROCESS_ACCT
626	struct pacct_struct pacct;	/* per-process accounting information */
627#endif
628#ifdef CONFIG_TASKSTATS
629	struct taskstats *stats;
630#endif
631#ifdef CONFIG_AUDIT
632	unsigned audit_tty;
633	struct tty_audit_buf *tty_audit_buf;
634#endif
635
636	int oom_adj;		/* OOM kill score adjustment (bit shift) */
637	int oom_score_adj;	/* OOM kill score adjustment */
638	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
639				 * Only settable by CAP_SYS_RESOURCE. */
640
641	struct mutex cred_guard_mutex;	/* guard against foreign influences on
642					 * credential calculations
643					 * (notably. ptrace) */
644};
645
646/* Context switch must be unlocked if interrupts are to be enabled */
647#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
648# define __ARCH_WANT_UNLOCKED_CTXSW
649#endif
650
651/*
652 * Bits in flags field of signal_struct.
653 */
654#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
655#define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */
656#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
657#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */
658/*
659 * Pending notifications to parent.
660 */
661#define SIGNAL_CLD_STOPPED	0x00000010
662#define SIGNAL_CLD_CONTINUED	0x00000020
663#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
664
665#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
666
667/* If true, all threads except ->group_exit_task have pending SIGKILL */
668static inline int signal_group_exit(const struct signal_struct *sig)
669{
670	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
671		(sig->group_exit_task != NULL);
672}
673
674/*
675 * Some day this will be a full-fledged user tracking system..
676 */
677struct user_struct {
678	atomic_t __count;	/* reference count */
679	atomic_t processes;	/* How many processes does this user have? */
680	atomic_t files;		/* How many open files does this user have? */
681	atomic_t sigpending;	/* How many pending signals does this user have? */
682#ifdef CONFIG_INOTIFY_USER
683	atomic_t inotify_watches; /* How many inotify watches does this user have? */
684	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
685#endif
686#ifdef CONFIG_FANOTIFY
687	atomic_t fanotify_listeners;
688#endif
689#ifdef CONFIG_EPOLL
690	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
691#endif
692#ifdef CONFIG_POSIX_MQUEUE
693	/* protected by mq_lock	*/
694	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
695#endif
696	unsigned long locked_shm; /* How many pages of mlocked shm ? */
697
698#ifdef CONFIG_KEYS
699	struct key *uid_keyring;	/* UID specific keyring */
700	struct key *session_keyring;	/* UID's default session keyring */
701#endif
702
703	/* Hash table maintenance information */
704	struct hlist_node uidhash_node;
705	uid_t uid;
706	struct user_namespace *user_ns;
707
708#ifdef CONFIG_PERF_EVENTS
709	atomic_long_t locked_vm;
710#endif
711};
712
713extern int uids_sysfs_init(void);
714
715extern struct user_struct *find_user(uid_t);
716
717extern struct user_struct root_user;
718#define INIT_USER (&root_user)
719
720
721struct backing_dev_info;
722struct reclaim_state;
723
724#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
725struct sched_info {
726	/* cumulative counters */
727	unsigned long pcount;	      /* # of times run on this cpu */
728	unsigned long long run_delay; /* time spent waiting on a runqueue */
729
730	/* timestamps */
731	unsigned long long last_arrival,/* when we last ran on a cpu */
732			   last_queued;	/* when we were last queued to run */
733#ifdef CONFIG_SCHEDSTATS
734	/* BKL stats */
735	unsigned int bkl_count;
736#endif
737};
738#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
739
740#ifdef CONFIG_TASK_DELAY_ACCT
741struct task_delay_info {
742	spinlock_t	lock;
743	unsigned int	flags;	/* Private per-task flags */
744
745	/* For each stat XXX, add following, aligned appropriately
746	 *
747	 * struct timespec XXX_start, XXX_end;
748	 * u64 XXX_delay;
749	 * u32 XXX_count;
750	 *
751	 * Atomicity of updates to XXX_delay, XXX_count protected by
752	 * single lock above (split into XXX_lock if contention is an issue).
753	 */
754
755	/*
756	 * XXX_count is incremented on every XXX operation, the delay
757	 * associated with the operation is added to XXX_delay.
758	 * XXX_delay contains the accumulated delay time in nanoseconds.
759	 */
760	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
761	u64 blkio_delay;	/* wait for sync block io completion */
762	u64 swapin_delay;	/* wait for swapin block io completion */
763	u32 blkio_count;	/* total count of the number of sync block */
764				/* io operations performed */
765	u32 swapin_count;	/* total count of the number of swapin block */
766				/* io operations performed */
767
768	struct timespec freepages_start, freepages_end;
769	u64 freepages_delay;	/* wait for memory reclaim */
770	u32 freepages_count;	/* total count of memory reclaim */
771};
772#endif	/* CONFIG_TASK_DELAY_ACCT */
773
774static inline int sched_info_on(void)
775{
776#ifdef CONFIG_SCHEDSTATS
777	return 1;
778#elif defined(CONFIG_TASK_DELAY_ACCT)
779	extern int delayacct_on;
780	return delayacct_on;
781#else
782	return 0;
783#endif
784}
785
786enum cpu_idle_type {
787	CPU_IDLE,
788	CPU_NOT_IDLE,
789	CPU_NEWLY_IDLE,
790	CPU_MAX_IDLE_TYPES
791};
792
793/*
794 * sched-domains (multiprocessor balancing) declarations:
795 */
796
797/*
798 * Increase resolution of nice-level calculations:
799 */
800#define SCHED_LOAD_SHIFT	10
801#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
802
803#define SCHED_LOAD_SCALE_FUZZ	SCHED_LOAD_SCALE
804
805#ifdef CONFIG_SMP
806#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
807#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
808#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
809#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
810#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
811#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
812#define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
813#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
814#define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
815#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
816#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
817#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
818#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
819
820enum powersavings_balance_level {
821	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
822	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
823					 * first for long running threads
824					 */
825	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
826					 * cpu package for power savings
827					 */
828	MAX_POWERSAVINGS_BALANCE_LEVELS
829};
830
831extern int sched_mc_power_savings, sched_smt_power_savings;
832
833static inline int sd_balance_for_mc_power(void)
834{
835	if (sched_smt_power_savings)
836		return SD_POWERSAVINGS_BALANCE;
837
838	if (!sched_mc_power_savings)
839		return SD_PREFER_SIBLING;
840
841	return 0;
842}
843
844static inline int sd_balance_for_package_power(void)
845{
846	if (sched_mc_power_savings | sched_smt_power_savings)
847		return SD_POWERSAVINGS_BALANCE;
848
849	return SD_PREFER_SIBLING;
850}
851
852extern int __weak arch_sd_sibiling_asym_packing(void);
853
854/*
855 * Optimise SD flags for power savings:
856 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
857 * Keep default SD flags if sched_{smt,mc}_power_saving=0
858 */
859
860static inline int sd_power_saving_flags(void)
861{
862	if (sched_mc_power_savings | sched_smt_power_savings)
863		return SD_BALANCE_NEWIDLE;
864
865	return 0;
866}
867
868struct sched_group {
869	struct sched_group *next;	/* Must be a circular list */
870
871	/*
872	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
873	 * single CPU.
874	 */
875	unsigned int cpu_power, cpu_power_orig;
876	unsigned int group_weight;
877
878	/*
879	 * The CPUs this group covers.
880	 *
881	 * NOTE: this field is variable length. (Allocated dynamically
882	 * by attaching extra space to the end of the structure,
883	 * depending on how many CPUs the kernel has booted up with)
884	 *
885	 * It is also be embedded into static data structures at build
886	 * time. (See 'struct static_sched_group' in kernel/sched.c)
887	 */
888	unsigned long cpumask[0];
889};
890
891static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
892{
893	return to_cpumask(sg->cpumask);
894}
895
896enum sched_domain_level {
897	SD_LV_NONE = 0,
898	SD_LV_SIBLING,
899	SD_LV_MC,
900	SD_LV_BOOK,
901	SD_LV_CPU,
902	SD_LV_NODE,
903	SD_LV_ALLNODES,
904	SD_LV_MAX
905};
906
907struct sched_domain_attr {
908	int relax_domain_level;
909};
910
911#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
912	.relax_domain_level = -1,			\
913}
914
915struct sched_domain {
916	/* These fields must be setup */
917	struct sched_domain *parent;	/* top domain must be null terminated */
918	struct sched_domain *child;	/* bottom domain must be null terminated */
919	struct sched_group *groups;	/* the balancing groups of the domain */
920	unsigned long min_interval;	/* Minimum balance interval ms */
921	unsigned long max_interval;	/* Maximum balance interval ms */
922	unsigned int busy_factor;	/* less balancing by factor if busy */
923	unsigned int imbalance_pct;	/* No balance until over watermark */
924	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
925	unsigned int busy_idx;
926	unsigned int idle_idx;
927	unsigned int newidle_idx;
928	unsigned int wake_idx;
929	unsigned int forkexec_idx;
930	unsigned int smt_gain;
931	int flags;			/* See SD_* */
932	enum sched_domain_level level;
933
934	/* Runtime fields. */
935	unsigned long last_balance;	/* init to jiffies. units in jiffies */
936	unsigned int balance_interval;	/* initialise to 1. units in ms. */
937	unsigned int nr_balance_failed; /* initialise to 0 */
938
939	u64 last_update;
940
941#ifdef CONFIG_SCHEDSTATS
942	/* load_balance() stats */
943	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
944	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
945	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
946	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
947	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
948	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
949	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
950	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
951
952	/* Active load balancing */
953	unsigned int alb_count;
954	unsigned int alb_failed;
955	unsigned int alb_pushed;
956
957	/* SD_BALANCE_EXEC stats */
958	unsigned int sbe_count;
959	unsigned int sbe_balanced;
960	unsigned int sbe_pushed;
961
962	/* SD_BALANCE_FORK stats */
963	unsigned int sbf_count;
964	unsigned int sbf_balanced;
965	unsigned int sbf_pushed;
966
967	/* try_to_wake_up() stats */
968	unsigned int ttwu_wake_remote;
969	unsigned int ttwu_move_affine;
970	unsigned int ttwu_move_balance;
971#endif
972#ifdef CONFIG_SCHED_DEBUG
973	char *name;
974#endif
975
976	unsigned int span_weight;
977	/*
978	 * Span of all CPUs in this domain.
979	 *
980	 * NOTE: this field is variable length. (Allocated dynamically
981	 * by attaching extra space to the end of the structure,
982	 * depending on how many CPUs the kernel has booted up with)
983	 *
984	 * It is also be embedded into static data structures at build
985	 * time. (See 'struct static_sched_domain' in kernel/sched.c)
986	 */
987	unsigned long span[0];
988};
989
990static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
991{
992	return to_cpumask(sd->span);
993}
994
995extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
996				    struct sched_domain_attr *dattr_new);
997
998/* Allocate an array of sched domains, for partition_sched_domains(). */
999cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1000void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1001
1002/* Test a flag in parent sched domain */
1003static inline int test_sd_parent(struct sched_domain *sd, int flag)
1004{
1005	if (sd->parent && (sd->parent->flags & flag))
1006		return 1;
1007
1008	return 0;
1009}
1010
1011unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1012unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1013
1014#else /* CONFIG_SMP */
1015
1016struct sched_domain_attr;
1017
1018static inline void
1019partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1020			struct sched_domain_attr *dattr_new)
1021{
1022}
1023#endif	/* !CONFIG_SMP */
1024
1025
1026struct io_context;			/* See blkdev.h */
1027
1028
1029#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1030extern void prefetch_stack(struct task_struct *t);
1031#else
1032static inline void prefetch_stack(struct task_struct *t) { }
1033#endif
1034
1035struct audit_context;		/* See audit.c */
1036struct mempolicy;
1037struct pipe_inode_info;
1038struct uts_namespace;
1039
1040struct rq;
1041struct sched_domain;
1042
1043/*
1044 * wake flags
1045 */
1046#define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1047#define WF_FORK		0x02		/* child wakeup after fork */
1048
1049#define ENQUEUE_WAKEUP		1
1050#define ENQUEUE_WAKING		2
1051#define ENQUEUE_HEAD		4
1052
1053#define DEQUEUE_SLEEP		1
1054
1055struct sched_class {
1056	const struct sched_class *next;
1057
1058	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1059	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1060	void (*yield_task) (struct rq *rq);
1061
1062	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1063
1064	struct task_struct * (*pick_next_task) (struct rq *rq);
1065	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1066
1067#ifdef CONFIG_SMP
1068	int  (*select_task_rq)(struct rq *rq, struct task_struct *p,
1069			       int sd_flag, int flags);
1070
1071	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1072	void (*post_schedule) (struct rq *this_rq);
1073	void (*task_waking) (struct rq *this_rq, struct task_struct *task);
1074	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1075
1076	void (*set_cpus_allowed)(struct task_struct *p,
1077				 const struct cpumask *newmask);
1078
1079	void (*rq_online)(struct rq *rq);
1080	void (*rq_offline)(struct rq *rq);
1081#endif
1082
1083	void (*set_curr_task) (struct rq *rq);
1084	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1085	void (*task_fork) (struct task_struct *p);
1086
1087	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
1088			       int running);
1089	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
1090			     int running);
1091	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1092			     int oldprio, int running);
1093
1094	unsigned int (*get_rr_interval) (struct rq *rq,
1095					 struct task_struct *task);
1096
1097#ifdef CONFIG_FAIR_GROUP_SCHED
1098	void (*task_move_group) (struct task_struct *p, int on_rq);
1099#endif
1100};
1101
1102struct load_weight {
1103	unsigned long weight, inv_weight;
1104};
1105
1106#ifdef CONFIG_SCHEDSTATS
1107struct sched_statistics {
1108	u64			wait_start;
1109	u64			wait_max;
1110	u64			wait_count;
1111	u64			wait_sum;
1112	u64			iowait_count;
1113	u64			iowait_sum;
1114
1115	u64			sleep_start;
1116	u64			sleep_max;
1117	s64			sum_sleep_runtime;
1118
1119	u64			block_start;
1120	u64			block_max;
1121	u64			exec_max;
1122	u64			slice_max;
1123
1124	u64			nr_migrations_cold;
1125	u64			nr_failed_migrations_affine;
1126	u64			nr_failed_migrations_running;
1127	u64			nr_failed_migrations_hot;
1128	u64			nr_forced_migrations;
1129
1130	u64			nr_wakeups;
1131	u64			nr_wakeups_sync;
1132	u64			nr_wakeups_migrate;
1133	u64			nr_wakeups_local;
1134	u64			nr_wakeups_remote;
1135	u64			nr_wakeups_affine;
1136	u64			nr_wakeups_affine_attempts;
1137	u64			nr_wakeups_passive;
1138	u64			nr_wakeups_idle;
1139};
1140#endif
1141
1142struct sched_entity {
1143	struct load_weight	load;		/* for load-balancing */
1144	struct rb_node		run_node;
1145	struct list_head	group_node;
1146	unsigned int		on_rq;
1147
1148	u64			exec_start;
1149	u64			sum_exec_runtime;
1150	u64			vruntime;
1151	u64			prev_sum_exec_runtime;
1152
1153	u64			nr_migrations;
1154
1155#ifdef CONFIG_SCHEDSTATS
1156	struct sched_statistics statistics;
1157#endif
1158
1159#ifdef CONFIG_FAIR_GROUP_SCHED
1160	struct sched_entity	*parent;
1161	/* rq on which this entity is (to be) queued: */
1162	struct cfs_rq		*cfs_rq;
1163	/* rq "owned" by this entity/group: */
1164	struct cfs_rq		*my_q;
1165#endif
1166};
1167
1168struct sched_rt_entity {
1169	struct list_head run_list;
1170	unsigned long timeout;
1171	unsigned int time_slice;
1172	int nr_cpus_allowed;
1173
1174	struct sched_rt_entity *back;
1175#ifdef CONFIG_RT_GROUP_SCHED
1176	struct sched_rt_entity	*parent;
1177	/* rq on which this entity is (to be) queued: */
1178	struct rt_rq		*rt_rq;
1179	/* rq "owned" by this entity/group: */
1180	struct rt_rq		*my_q;
1181#endif
1182};
1183
1184struct rcu_node;
1185
1186enum perf_event_task_context {
1187	perf_invalid_context = -1,
1188	perf_hw_context = 0,
1189	perf_sw_context,
1190	perf_nr_task_contexts,
1191};
1192
1193struct task_struct {
1194	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1195	void *stack;
1196	atomic_t usage;
1197	unsigned int flags;	/* per process flags, defined below */
1198	unsigned int ptrace;
1199
1200	int lock_depth;		/* BKL lock depth */
1201
1202#ifdef CONFIG_SMP
1203#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1204	int oncpu;
1205#endif
1206#endif
1207
1208	int prio, static_prio, normal_prio;
1209	unsigned int rt_priority;
1210	const struct sched_class *sched_class;
1211	struct sched_entity se;
1212	struct sched_rt_entity rt;
1213
1214#ifdef CONFIG_PREEMPT_NOTIFIERS
1215	/* list of struct preempt_notifier: */
1216	struct hlist_head preempt_notifiers;
1217#endif
1218
1219	/*
1220	 * fpu_counter contains the number of consecutive context switches
1221	 * that the FPU is used. If this is over a threshold, the lazy fpu
1222	 * saving becomes unlazy to save the trap. This is an unsigned char
1223	 * so that after 256 times the counter wraps and the behavior turns
1224	 * lazy again; this to deal with bursty apps that only use FPU for
1225	 * a short time
1226	 */
1227	unsigned char fpu_counter;
1228#ifdef CONFIG_BLK_DEV_IO_TRACE
1229	unsigned int btrace_seq;
1230#endif
1231
1232	unsigned int policy;
1233	cpumask_t cpus_allowed;
1234
1235#ifdef CONFIG_PREEMPT_RCU
1236	int rcu_read_lock_nesting;
1237	char rcu_read_unlock_special;
1238	struct list_head rcu_node_entry;
1239#endif /* #ifdef CONFIG_PREEMPT_RCU */
1240#ifdef CONFIG_TREE_PREEMPT_RCU
1241	struct rcu_node *rcu_blocked_node;
1242#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1243#ifdef CONFIG_RCU_BOOST
1244	struct rt_mutex *rcu_boost_mutex;
1245#endif /* #ifdef CONFIG_RCU_BOOST */
1246
1247#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1248	struct sched_info sched_info;
1249#endif
1250
1251	struct list_head tasks;
1252#ifdef CONFIG_SMP
1253	struct plist_node pushable_tasks;
1254#endif
1255
1256	struct mm_struct *mm, *active_mm;
1257#if defined(SPLIT_RSS_COUNTING)
1258	struct task_rss_stat	rss_stat;
1259#endif
1260/* task state */
1261	int exit_state;
1262	int exit_code, exit_signal;
1263	int pdeath_signal;  /*  The signal sent when the parent dies  */
1264	/* ??? */
1265	unsigned int personality;
1266	unsigned did_exec:1;
1267	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1268				 * execve */
1269	unsigned in_iowait:1;
1270
1271
1272	/* Revert to default priority/policy when forking */
1273	unsigned sched_reset_on_fork:1;
1274
1275	pid_t pid;
1276	pid_t tgid;
1277
1278#ifdef CONFIG_CC_STACKPROTECTOR
1279	/* Canary value for the -fstack-protector gcc feature */
1280	unsigned long stack_canary;
1281#endif
1282
1283	/*
1284	 * pointers to (original) parent process, youngest child, younger sibling,
1285	 * older sibling, respectively.  (p->father can be replaced with
1286	 * p->real_parent->pid)
1287	 */
1288	struct task_struct *real_parent; /* real parent process */
1289	struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
1290	/*
1291	 * children/sibling forms the list of my natural children
1292	 */
1293	struct list_head children;	/* list of my children */
1294	struct list_head sibling;	/* linkage in my parent's children list */
1295	struct task_struct *group_leader;	/* threadgroup leader */
1296
1297	/*
1298	 * ptraced is the list of tasks this task is using ptrace on.
1299	 * This includes both natural children and PTRACE_ATTACH targets.
1300	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1301	 */
1302	struct list_head ptraced;
1303	struct list_head ptrace_entry;
1304
1305	/* PID/PID hash table linkage. */
1306	struct pid_link pids[PIDTYPE_MAX];
1307	struct list_head thread_group;
1308
1309	struct completion *vfork_done;		/* for vfork() */
1310	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1311	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1312
1313	cputime_t utime, stime, utimescaled, stimescaled;
1314	cputime_t gtime;
1315#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1316	cputime_t prev_utime, prev_stime;
1317#endif
1318	unsigned long nvcsw, nivcsw; /* context switch counts */
1319	struct timespec start_time; 		/* monotonic time */
1320	struct timespec real_start_time;	/* boot based time */
1321/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1322	unsigned long min_flt, maj_flt;
1323
1324	struct task_cputime cputime_expires;
1325	struct list_head cpu_timers[3];
1326
1327/* process credentials */
1328	const struct cred __rcu *real_cred; /* objective and real subjective task
1329					 * credentials (COW) */
1330	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1331					 * credentials (COW) */
1332	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1333
1334	char comm[TASK_COMM_LEN]; /* executable name excluding path
1335				     - access with [gs]et_task_comm (which lock
1336				       it with task_lock())
1337				     - initialized normally by setup_new_exec */
1338/* file system info */
1339	int link_count, total_link_count;
1340#ifdef CONFIG_SYSVIPC
1341/* ipc stuff */
1342	struct sysv_sem sysvsem;
1343#endif
1344#ifdef CONFIG_DETECT_HUNG_TASK
1345/* hung task detection */
1346	unsigned long last_switch_count;
1347#endif
1348/* CPU-specific state of this task */
1349	struct thread_struct thread;
1350/* filesystem information */
1351	struct fs_struct *fs;
1352/* open file information */
1353	struct files_struct *files;
1354/* namespaces */
1355	struct nsproxy *nsproxy;
1356/* signal handlers */
1357	struct signal_struct *signal;
1358	struct sighand_struct *sighand;
1359
1360	sigset_t blocked, real_blocked;
1361	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1362	struct sigpending pending;
1363
1364	unsigned long sas_ss_sp;
1365	size_t sas_ss_size;
1366	int (*notifier)(void *priv);
1367	void *notifier_data;
1368	sigset_t *notifier_mask;
1369	struct audit_context *audit_context;
1370#ifdef CONFIG_AUDITSYSCALL
1371	uid_t loginuid;
1372	unsigned int sessionid;
1373#endif
1374	seccomp_t seccomp;
1375
1376/* Thread group tracking */
1377   	u32 parent_exec_id;
1378   	u32 self_exec_id;
1379/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1380 * mempolicy */
1381	spinlock_t alloc_lock;
1382
1383#ifdef CONFIG_GENERIC_HARDIRQS
1384	/* IRQ handler threads */
1385	struct irqaction *irqaction;
1386#endif
1387
1388	/* Protection of the PI data structures: */
1389	raw_spinlock_t pi_lock;
1390
1391#ifdef CONFIG_RT_MUTEXES
1392	/* PI waiters blocked on a rt_mutex held by this task */
1393	struct plist_head pi_waiters;
1394	/* Deadlock detection and priority inheritance handling */
1395	struct rt_mutex_waiter *pi_blocked_on;
1396#endif
1397
1398#ifdef CONFIG_DEBUG_MUTEXES
1399	/* mutex deadlock detection */
1400	struct mutex_waiter *blocked_on;
1401#endif
1402#ifdef CONFIG_TRACE_IRQFLAGS
1403	unsigned int irq_events;
1404	unsigned long hardirq_enable_ip;
1405	unsigned long hardirq_disable_ip;
1406	unsigned int hardirq_enable_event;
1407	unsigned int hardirq_disable_event;
1408	int hardirqs_enabled;
1409	int hardirq_context;
1410	unsigned long softirq_disable_ip;
1411	unsigned long softirq_enable_ip;
1412	unsigned int softirq_disable_event;
1413	unsigned int softirq_enable_event;
1414	int softirqs_enabled;
1415	int softirq_context;
1416#endif
1417#ifdef CONFIG_LOCKDEP
1418# define MAX_LOCK_DEPTH 48UL
1419	u64 curr_chain_key;
1420	int lockdep_depth;
1421	unsigned int lockdep_recursion;
1422	struct held_lock held_locks[MAX_LOCK_DEPTH];
1423	gfp_t lockdep_reclaim_gfp;
1424#endif
1425
1426/* journalling filesystem info */
1427	void *journal_info;
1428
1429/* stacked block device info */
1430	struct bio_list *bio_list;
1431
1432/* VM state */
1433	struct reclaim_state *reclaim_state;
1434
1435	struct backing_dev_info *backing_dev_info;
1436
1437	struct io_context *io_context;
1438
1439	unsigned long ptrace_message;
1440	siginfo_t *last_siginfo; /* For ptrace use.  */
1441	struct task_io_accounting ioac;
1442#if defined(CONFIG_TASK_XACCT)
1443	u64 acct_rss_mem1;	/* accumulated rss usage */
1444	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1445	cputime_t acct_timexpd;	/* stime + utime since last update */
1446#endif
1447#ifdef CONFIG_CPUSETS
1448	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1449	int mems_allowed_change_disable;
1450	int cpuset_mem_spread_rotor;
1451	int cpuset_slab_spread_rotor;
1452#endif
1453#ifdef CONFIG_CGROUPS
1454	/* Control Group info protected by css_set_lock */
1455	struct css_set __rcu *cgroups;
1456	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1457	struct list_head cg_list;
1458#endif
1459#ifdef CONFIG_FUTEX
1460	struct robust_list_head __user *robust_list;
1461#ifdef CONFIG_COMPAT
1462	struct compat_robust_list_head __user *compat_robust_list;
1463#endif
1464	struct list_head pi_state_list;
1465	struct futex_pi_state *pi_state_cache;
1466#endif
1467#ifdef CONFIG_PERF_EVENTS
1468	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1469	struct mutex perf_event_mutex;
1470	struct list_head perf_event_list;
1471#endif
1472#ifdef CONFIG_NUMA
1473	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1474	short il_next;
1475#endif
1476	atomic_t fs_excl;	/* holding fs exclusive resources */
1477	struct rcu_head rcu;
1478
1479	/*
1480	 * cache last used pipe for splice
1481	 */
1482	struct pipe_inode_info *splice_pipe;
1483#ifdef	CONFIG_TASK_DELAY_ACCT
1484	struct task_delay_info *delays;
1485#endif
1486#ifdef CONFIG_FAULT_INJECTION
1487	int make_it_fail;
1488#endif
1489	struct prop_local_single dirties;
1490#ifdef CONFIG_LATENCYTOP
1491	int latency_record_count;
1492	struct latency_record latency_record[LT_SAVECOUNT];
1493#endif
1494	/*
1495	 * time slack values; these are used to round up poll() and
1496	 * select() etc timeout values. These are in nanoseconds.
1497	 */
1498	unsigned long timer_slack_ns;
1499	unsigned long default_timer_slack_ns;
1500
1501	struct list_head	*scm_work_list;
1502#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1503	/* Index of current stored address in ret_stack */
1504	int curr_ret_stack;
1505	/* Stack of return addresses for return function tracing */
1506	struct ftrace_ret_stack	*ret_stack;
1507	/* time stamp for last schedule */
1508	unsigned long long ftrace_timestamp;
1509	/*
1510	 * Number of functions that haven't been traced
1511	 * because of depth overrun.
1512	 */
1513	atomic_t trace_overrun;
1514	/* Pause for the tracing */
1515	atomic_t tracing_graph_pause;
1516#endif
1517#ifdef CONFIG_TRACING
1518	/* state flags for use by tracers */
1519	unsigned long trace;
1520	/* bitmask of trace recursion */
1521	unsigned long trace_recursion;
1522#endif /* CONFIG_TRACING */
1523#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1524	struct memcg_batch_info {
1525		int do_batch;	/* incremented when batch uncharge started */
1526		struct mem_cgroup *memcg; /* target memcg of uncharge */
1527		unsigned long bytes; 		/* uncharged usage */
1528		unsigned long memsw_bytes; /* uncharged mem+swap usage */
1529	} memcg_batch;
1530#endif
1531};
1532
1533/* Future-safe accessor for struct task_struct's cpus_allowed. */
1534#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1535
1536/*
1537 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1538 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1539 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1540 * values are inverted: lower p->prio value means higher priority.
1541 *
1542 * The MAX_USER_RT_PRIO value allows the actual maximum
1543 * RT priority to be separate from the value exported to
1544 * user-space.  This allows kernel threads to set their
1545 * priority to a value higher than any user task. Note:
1546 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1547 */
1548
1549#define MAX_USER_RT_PRIO	100
1550#define MAX_RT_PRIO		MAX_USER_RT_PRIO
1551
1552#define MAX_PRIO		(MAX_RT_PRIO + 40)
1553#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1554
1555static inline int rt_prio(int prio)
1556{
1557	if (unlikely(prio < MAX_RT_PRIO))
1558		return 1;
1559	return 0;
1560}
1561
1562static inline int rt_task(struct task_struct *p)
1563{
1564	return rt_prio(p->prio);
1565}
1566
1567static inline struct pid *task_pid(struct task_struct *task)
1568{
1569	return task->pids[PIDTYPE_PID].pid;
1570}
1571
1572static inline struct pid *task_tgid(struct task_struct *task)
1573{
1574	return task->group_leader->pids[PIDTYPE_PID].pid;
1575}
1576
1577/*
1578 * Without tasklist or rcu lock it is not safe to dereference
1579 * the result of task_pgrp/task_session even if task == current,
1580 * we can race with another thread doing sys_setsid/sys_setpgid.
1581 */
1582static inline struct pid *task_pgrp(struct task_struct *task)
1583{
1584	return task->group_leader->pids[PIDTYPE_PGID].pid;
1585}
1586
1587static inline struct pid *task_session(struct task_struct *task)
1588{
1589	return task->group_leader->pids[PIDTYPE_SID].pid;
1590}
1591
1592struct pid_namespace;
1593
1594/*
1595 * the helpers to get the task's different pids as they are seen
1596 * from various namespaces
1597 *
1598 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1599 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1600 *                     current.
1601 * task_xid_nr_ns()  : id seen from the ns specified;
1602 *
1603 * set_task_vxid()   : assigns a virtual id to a task;
1604 *
1605 * see also pid_nr() etc in include/linux/pid.h
1606 */
1607pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1608			struct pid_namespace *ns);
1609
1610static inline pid_t task_pid_nr(struct task_struct *tsk)
1611{
1612	return tsk->pid;
1613}
1614
1615static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1616					struct pid_namespace *ns)
1617{
1618	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1619}
1620
1621static inline pid_t task_pid_vnr(struct task_struct *tsk)
1622{
1623	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1624}
1625
1626
1627static inline pid_t task_tgid_nr(struct task_struct *tsk)
1628{
1629	return tsk->tgid;
1630}
1631
1632pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1633
1634static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1635{
1636	return pid_vnr(task_tgid(tsk));
1637}
1638
1639
1640static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1641					struct pid_namespace *ns)
1642{
1643	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1644}
1645
1646static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1647{
1648	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1649}
1650
1651
1652static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1653					struct pid_namespace *ns)
1654{
1655	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1656}
1657
1658static inline pid_t task_session_vnr(struct task_struct *tsk)
1659{
1660	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1661}
1662
1663/* obsolete, do not use */
1664static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1665{
1666	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1667}
1668
1669/**
1670 * pid_alive - check that a task structure is not stale
1671 * @p: Task structure to be checked.
1672 *
1673 * Test if a process is not yet dead (at most zombie state)
1674 * If pid_alive fails, then pointers within the task structure
1675 * can be stale and must not be dereferenced.
1676 */
1677static inline int pid_alive(struct task_struct *p)
1678{
1679	return p->pids[PIDTYPE_PID].pid != NULL;
1680}
1681
1682/**
1683 * is_global_init - check if a task structure is init
1684 * @tsk: Task structure to be checked.
1685 *
1686 * Check if a task structure is the first user space task the kernel created.
1687 */
1688static inline int is_global_init(struct task_struct *tsk)
1689{
1690	return tsk->pid == 1;
1691}
1692
1693/*
1694 * is_container_init:
1695 * check whether in the task is init in its own pid namespace.
1696 */
1697extern int is_container_init(struct task_struct *tsk);
1698
1699extern struct pid *cad_pid;
1700
1701extern void free_task(struct task_struct *tsk);
1702#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1703
1704extern void __put_task_struct(struct task_struct *t);
1705
1706static inline void put_task_struct(struct task_struct *t)
1707{
1708	if (atomic_dec_and_test(&t->usage))
1709		__put_task_struct(t);
1710}
1711
1712extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1713extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1714
1715/*
1716 * Per process flags
1717 */
1718#define PF_KSOFTIRQD	0x00000001	/* I am ksoftirqd */
1719#define PF_STARTING	0x00000002	/* being created */
1720#define PF_EXITING	0x00000004	/* getting shut down */
1721#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1722#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1723#define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1724#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1725#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1726#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1727#define PF_DUMPCORE	0x00000200	/* dumped core */
1728#define PF_SIGNALED	0x00000400	/* killed by a signal */
1729#define PF_MEMALLOC	0x00000800	/* Allocating memory */
1730#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1731#define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
1732#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1733#define PF_FROZEN	0x00010000	/* frozen for system suspend */
1734#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1735#define PF_KSWAPD	0x00040000	/* I am kswapd */
1736#define PF_OOM_ORIGIN	0x00080000	/* Allocating much memory to others */
1737#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1738#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1739#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1740#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1741#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1742#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1743#define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1744#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1745#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1746#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1747#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
1748#define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
1749
1750/*
1751 * Only the _current_ task can read/write to tsk->flags, but other
1752 * tasks can access tsk->flags in readonly mode for example
1753 * with tsk_used_math (like during threaded core dumping).
1754 * There is however an exception to this rule during ptrace
1755 * or during fork: the ptracer task is allowed to write to the
1756 * child->flags of its traced child (same goes for fork, the parent
1757 * can write to the child->flags), because we're guaranteed the
1758 * child is not running and in turn not changing child->flags
1759 * at the same time the parent does it.
1760 */
1761#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1762#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1763#define clear_used_math() clear_stopped_child_used_math(current)
1764#define set_used_math() set_stopped_child_used_math(current)
1765#define conditional_stopped_child_used_math(condition, child) \
1766	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1767#define conditional_used_math(condition) \
1768	conditional_stopped_child_used_math(condition, current)
1769#define copy_to_stopped_child_used_math(child) \
1770	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1771/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1772#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1773#define used_math() tsk_used_math(current)
1774
1775#ifdef CONFIG_PREEMPT_RCU
1776
1777#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1778#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
1779#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1780
1781static inline void rcu_copy_process(struct task_struct *p)
1782{
1783	p->rcu_read_lock_nesting = 0;
1784	p->rcu_read_unlock_special = 0;
1785#ifdef CONFIG_TREE_PREEMPT_RCU
1786	p->rcu_blocked_node = NULL;
1787#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1788#ifdef CONFIG_RCU_BOOST
1789	p->rcu_boost_mutex = NULL;
1790#endif /* #ifdef CONFIG_RCU_BOOST */
1791	INIT_LIST_HEAD(&p->rcu_node_entry);
1792}
1793
1794#else
1795
1796static inline void rcu_copy_process(struct task_struct *p)
1797{
1798}
1799
1800#endif
1801
1802#ifdef CONFIG_SMP
1803extern int set_cpus_allowed_ptr(struct task_struct *p,
1804				const struct cpumask *new_mask);
1805#else
1806static inline int set_cpus_allowed_ptr(struct task_struct *p,
1807				       const struct cpumask *new_mask)
1808{
1809	if (!cpumask_test_cpu(0, new_mask))
1810		return -EINVAL;
1811	return 0;
1812}
1813#endif
1814
1815#ifndef CONFIG_CPUMASK_OFFSTACK
1816static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1817{
1818	return set_cpus_allowed_ptr(p, &new_mask);
1819}
1820#endif
1821
1822/*
1823 * Do not use outside of architecture code which knows its limitations.
1824 *
1825 * sched_clock() has no promise of monotonicity or bounded drift between
1826 * CPUs, use (which you should not) requires disabling IRQs.
1827 *
1828 * Please use one of the three interfaces below.
1829 */
1830extern unsigned long long notrace sched_clock(void);
1831/*
1832 * See the comment in kernel/sched_clock.c
1833 */
1834extern u64 cpu_clock(int cpu);
1835extern u64 local_clock(void);
1836extern u64 sched_clock_cpu(int cpu);
1837
1838
1839extern void sched_clock_init(void);
1840
1841#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1842static inline void sched_clock_tick(void)
1843{
1844}
1845
1846static inline void sched_clock_idle_sleep_event(void)
1847{
1848}
1849
1850static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1851{
1852}
1853#else
1854/*
1855 * Architectures can set this to 1 if they have specified
1856 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1857 * but then during bootup it turns out that sched_clock()
1858 * is reliable after all:
1859 */
1860extern int sched_clock_stable;
1861
1862extern void sched_clock_tick(void);
1863extern void sched_clock_idle_sleep_event(void);
1864extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1865#endif
1866
1867#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1868/*
1869 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1870 * The reason for this explicit opt-in is not to have perf penalty with
1871 * slow sched_clocks.
1872 */
1873extern void enable_sched_clock_irqtime(void);
1874extern void disable_sched_clock_irqtime(void);
1875#else
1876static inline void enable_sched_clock_irqtime(void) {}
1877static inline void disable_sched_clock_irqtime(void) {}
1878#endif
1879
1880extern unsigned long long
1881task_sched_runtime(struct task_struct *task);
1882extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1883
1884/* sched_exec is called by processes performing an exec */
1885#ifdef CONFIG_SMP
1886extern void sched_exec(void);
1887#else
1888#define sched_exec()   {}
1889#endif
1890
1891extern void sched_clock_idle_sleep_event(void);
1892extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1893
1894#ifdef CONFIG_HOTPLUG_CPU
1895extern void idle_task_exit(void);
1896#else
1897static inline void idle_task_exit(void) {}
1898#endif
1899
1900#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1901extern void wake_up_idle_cpu(int cpu);
1902#else
1903static inline void wake_up_idle_cpu(int cpu) { }
1904#endif
1905
1906extern unsigned int sysctl_sched_latency;
1907extern unsigned int sysctl_sched_min_granularity;
1908extern unsigned int sysctl_sched_wakeup_granularity;
1909extern unsigned int sysctl_sched_child_runs_first;
1910
1911enum sched_tunable_scaling {
1912	SCHED_TUNABLESCALING_NONE,
1913	SCHED_TUNABLESCALING_LOG,
1914	SCHED_TUNABLESCALING_LINEAR,
1915	SCHED_TUNABLESCALING_END,
1916};
1917extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
1918
1919#ifdef CONFIG_SCHED_DEBUG
1920extern unsigned int sysctl_sched_migration_cost;
1921extern unsigned int sysctl_sched_nr_migrate;
1922extern unsigned int sysctl_sched_time_avg;
1923extern unsigned int sysctl_timer_migration;
1924extern unsigned int sysctl_sched_shares_window;
1925
1926int sched_proc_update_handler(struct ctl_table *table, int write,
1927		void __user *buffer, size_t *length,
1928		loff_t *ppos);
1929#endif
1930#ifdef CONFIG_SCHED_DEBUG
1931static inline unsigned int get_sysctl_timer_migration(void)
1932{
1933	return sysctl_timer_migration;
1934}
1935#else
1936static inline unsigned int get_sysctl_timer_migration(void)
1937{
1938	return 1;
1939}
1940#endif
1941extern unsigned int sysctl_sched_rt_period;
1942extern int sysctl_sched_rt_runtime;
1943
1944int sched_rt_handler(struct ctl_table *table, int write,
1945		void __user *buffer, size_t *lenp,
1946		loff_t *ppos);
1947
1948extern unsigned int sysctl_sched_compat_yield;
1949
1950#ifdef CONFIG_SCHED_AUTOGROUP
1951extern unsigned int sysctl_sched_autogroup_enabled;
1952
1953extern void sched_autogroup_create_attach(struct task_struct *p);
1954extern void sched_autogroup_detach(struct task_struct *p);
1955extern void sched_autogroup_fork(struct signal_struct *sig);
1956extern void sched_autogroup_exit(struct signal_struct *sig);
1957#ifdef CONFIG_PROC_FS
1958extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1959extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
1960#endif
1961#else
1962static inline void sched_autogroup_create_attach(struct task_struct *p) { }
1963static inline void sched_autogroup_detach(struct task_struct *p) { }
1964static inline void sched_autogroup_fork(struct signal_struct *sig) { }
1965static inline void sched_autogroup_exit(struct signal_struct *sig) { }
1966#endif
1967
1968#ifdef CONFIG_RT_MUTEXES
1969extern int rt_mutex_getprio(struct task_struct *p);
1970extern void rt_mutex_setprio(struct task_struct *p, int prio);
1971extern void rt_mutex_adjust_pi(struct task_struct *p);
1972#else
1973static inline int rt_mutex_getprio(struct task_struct *p)
1974{
1975	return p->normal_prio;
1976}
1977# define rt_mutex_adjust_pi(p)		do { } while (0)
1978#endif
1979
1980extern void set_user_nice(struct task_struct *p, long nice);
1981extern int task_prio(const struct task_struct *p);
1982extern int task_nice(const struct task_struct *p);
1983extern int can_nice(const struct task_struct *p, const int nice);
1984extern int task_curr(const struct task_struct *p);
1985extern int idle_cpu(int cpu);
1986extern int sched_setscheduler(struct task_struct *, int,
1987			      const struct sched_param *);
1988extern int sched_setscheduler_nocheck(struct task_struct *, int,
1989				      const struct sched_param *);
1990extern struct task_struct *idle_task(int cpu);
1991extern struct task_struct *curr_task(int cpu);
1992extern void set_curr_task(int cpu, struct task_struct *p);
1993
1994void yield(void);
1995
1996/*
1997 * The default (Linux) execution domain.
1998 */
1999extern struct exec_domain	default_exec_domain;
2000
2001union thread_union {
2002	struct thread_info thread_info;
2003	unsigned long stack[THREAD_SIZE/sizeof(long)];
2004};
2005
2006#ifndef __HAVE_ARCH_KSTACK_END
2007static inline int kstack_end(void *addr)
2008{
2009	/* Reliable end of stack detection:
2010	 * Some APM bios versions misalign the stack
2011	 */
2012	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2013}
2014#endif
2015
2016extern union thread_union init_thread_union;
2017extern struct task_struct init_task;
2018
2019extern struct   mm_struct init_mm;
2020
2021extern struct pid_namespace init_pid_ns;
2022
2023/*
2024 * find a task by one of its numerical ids
2025 *
2026 * find_task_by_pid_ns():
2027 *      finds a task by its pid in the specified namespace
2028 * find_task_by_vpid():
2029 *      finds a task by its virtual pid
2030 *
2031 * see also find_vpid() etc in include/linux/pid.h
2032 */
2033
2034extern struct task_struct *find_task_by_vpid(pid_t nr);
2035extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2036		struct pid_namespace *ns);
2037
2038extern void __set_special_pids(struct pid *pid);
2039
2040/* per-UID process charging. */
2041extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
2042static inline struct user_struct *get_uid(struct user_struct *u)
2043{
2044	atomic_inc(&u->__count);
2045	return u;
2046}
2047extern void free_uid(struct user_struct *);
2048extern void release_uids(struct user_namespace *ns);
2049
2050#include <asm/current.h>
2051
2052extern void do_timer(unsigned long ticks);
2053extern void xtime_update(unsigned long ticks);
2054
2055extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2056extern int wake_up_process(struct task_struct *tsk);
2057extern void wake_up_new_task(struct task_struct *tsk,
2058				unsigned long clone_flags);
2059#ifdef CONFIG_SMP
2060 extern void kick_process(struct task_struct *tsk);
2061#else
2062 static inline void kick_process(struct task_struct *tsk) { }
2063#endif
2064extern void sched_fork(struct task_struct *p, int clone_flags);
2065extern void sched_dead(struct task_struct *p);
2066
2067extern void proc_caches_init(void);
2068extern void flush_signals(struct task_struct *);
2069extern void __flush_signals(struct task_struct *);
2070extern void ignore_signals(struct task_struct *);
2071extern void flush_signal_handlers(struct task_struct *, int force_default);
2072extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2073
2074static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2075{
2076	unsigned long flags;
2077	int ret;
2078
2079	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2080	ret = dequeue_signal(tsk, mask, info);
2081	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2082
2083	return ret;
2084}
2085
2086extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2087			      sigset_t *mask);
2088extern void unblock_all_signals(void);
2089extern void release_task(struct task_struct * p);
2090extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2091extern int force_sigsegv(int, struct task_struct *);
2092extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2093extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2094extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2095extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
2096extern int kill_pgrp(struct pid *pid, int sig, int priv);
2097extern int kill_pid(struct pid *pid, int sig, int priv);
2098extern int kill_proc_info(int, struct siginfo *, pid_t);
2099extern int do_notify_parent(struct task_struct *, int);
2100extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2101extern void force_sig(int, struct task_struct *);
2102extern int send_sig(int, struct task_struct *, int);
2103extern int zap_other_threads(struct task_struct *p);
2104extern struct sigqueue *sigqueue_alloc(void);
2105extern void sigqueue_free(struct sigqueue *);
2106extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2107extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2108extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2109
2110static inline int kill_cad_pid(int sig, int priv)
2111{
2112	return kill_pid(cad_pid, sig, priv);
2113}
2114
2115/* These can be the second arg to send_sig_info/send_group_sig_info.  */
2116#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2117#define SEND_SIG_PRIV	((struct siginfo *) 1)
2118#define SEND_SIG_FORCED	((struct siginfo *) 2)
2119
2120/*
2121 * True if we are on the alternate signal stack.
2122 */
2123static inline int on_sig_stack(unsigned long sp)
2124{
2125#ifdef CONFIG_STACK_GROWSUP
2126	return sp >= current->sas_ss_sp &&
2127		sp - current->sas_ss_sp < current->sas_ss_size;
2128#else
2129	return sp > current->sas_ss_sp &&
2130		sp - current->sas_ss_sp <= current->sas_ss_size;
2131#endif
2132}
2133
2134static inline int sas_ss_flags(unsigned long sp)
2135{
2136	return (current->sas_ss_size == 0 ? SS_DISABLE
2137		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2138}
2139
2140/*
2141 * Routines for handling mm_structs
2142 */
2143extern struct mm_struct * mm_alloc(void);
2144
2145/* mmdrop drops the mm and the page tables */
2146extern void __mmdrop(struct mm_struct *);
2147static inline void mmdrop(struct mm_struct * mm)
2148{
2149	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2150		__mmdrop(mm);
2151}
2152
2153/* mmput gets rid of the mappings and all user-space */
2154extern void mmput(struct mm_struct *);
2155/* Grab a reference to a task's mm, if it is not already going away */
2156extern struct mm_struct *get_task_mm(struct task_struct *task);
2157/* Remove the current tasks stale references to the old mm_struct */
2158extern void mm_release(struct task_struct *, struct mm_struct *);
2159/* Allocate a new mm structure and copy contents from tsk->mm */
2160extern struct mm_struct *dup_mm(struct task_struct *tsk);
2161
2162extern int copy_thread(unsigned long, unsigned long, unsigned long,
2163			struct task_struct *, struct pt_regs *);
2164extern void flush_thread(void);
2165extern void exit_thread(void);
2166
2167extern void exit_files(struct task_struct *);
2168extern void __cleanup_sighand(struct sighand_struct *);
2169
2170extern void exit_itimers(struct signal_struct *);
2171extern void flush_itimer_signals(void);
2172
2173extern NORET_TYPE void do_group_exit(int);
2174
2175extern void daemonize(const char *, ...);
2176extern int allow_signal(int);
2177extern int disallow_signal(int);
2178
2179extern int do_execve(const char *,
2180		     const char __user * const __user *,
2181		     const char __user * const __user *, struct pt_regs *);
2182extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2183struct task_struct *fork_idle(int);
2184
2185extern void set_task_comm(struct task_struct *tsk, char *from);
2186extern char *get_task_comm(char *to, struct task_struct *tsk);
2187
2188#ifdef CONFIG_SMP
2189extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2190#else
2191static inline unsigned long wait_task_inactive(struct task_struct *p,
2192					       long match_state)
2193{
2194	return 1;
2195}
2196#endif
2197
2198#define next_task(p) \
2199	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2200
2201#define for_each_process(p) \
2202	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2203
2204extern bool current_is_single_threaded(void);
2205
2206/*
2207 * Careful: do_each_thread/while_each_thread is a double loop so
2208 *          'break' will not work as expected - use goto instead.
2209 */
2210#define do_each_thread(g, t) \
2211	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2212
2213#define while_each_thread(g, t) \
2214	while ((t = next_thread(t)) != g)
2215
2216static inline int get_nr_threads(struct task_struct *tsk)
2217{
2218	return tsk->signal->nr_threads;
2219}
2220
2221/* de_thread depends on thread_group_leader not being a pid based check */
2222#define thread_group_leader(p)	(p == p->group_leader)
2223
2224/* Do to the insanities of de_thread it is possible for a process
2225 * to have the pid of the thread group leader without actually being
2226 * the thread group leader.  For iteration through the pids in proc
2227 * all we care about is that we have a task with the appropriate
2228 * pid, we don't actually care if we have the right task.
2229 */
2230static inline int has_group_leader_pid(struct task_struct *p)
2231{
2232	return p->pid == p->tgid;
2233}
2234
2235static inline
2236int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2237{
2238	return p1->tgid == p2->tgid;
2239}
2240
2241static inline struct task_struct *next_thread(const struct task_struct *p)
2242{
2243	return list_entry_rcu(p->thread_group.next,
2244			      struct task_struct, thread_group);
2245}
2246
2247static inline int thread_group_empty(struct task_struct *p)
2248{
2249	return list_empty(&p->thread_group);
2250}
2251
2252#define delay_group_leader(p) \
2253		(thread_group_leader(p) && !thread_group_empty(p))
2254
2255static inline int task_detached(struct task_struct *p)
2256{
2257	return p->exit_signal == -1;
2258}
2259
2260/*
2261 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2262 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2263 * pins the final release of task.io_context.  Also protects ->cpuset and
2264 * ->cgroup.subsys[].
2265 *
2266 * Nests both inside and outside of read_lock(&tasklist_lock).
2267 * It must not be nested with write_lock_irq(&tasklist_lock),
2268 * neither inside nor outside.
2269 */
2270static inline void task_lock(struct task_struct *p)
2271{
2272	spin_lock(&p->alloc_lock);
2273}
2274
2275static inline void task_unlock(struct task_struct *p)
2276{
2277	spin_unlock(&p->alloc_lock);
2278}
2279
2280extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2281							unsigned long *flags);
2282
2283#define lock_task_sighand(tsk, flags)					\
2284({	struct sighand_struct *__ss;					\
2285	__cond_lock(&(tsk)->sighand->siglock,				\
2286		    (__ss = __lock_task_sighand(tsk, flags)));		\
2287	__ss;								\
2288})									\
2289
2290static inline void unlock_task_sighand(struct task_struct *tsk,
2291						unsigned long *flags)
2292{
2293	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2294}
2295
2296#ifndef __HAVE_THREAD_FUNCTIONS
2297
2298#define task_thread_info(task)	((struct thread_info *)(task)->stack)
2299#define task_stack_page(task)	((task)->stack)
2300
2301static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2302{
2303	*task_thread_info(p) = *task_thread_info(org);
2304	task_thread_info(p)->task = p;
2305}
2306
2307static inline unsigned long *end_of_stack(struct task_struct *p)
2308{
2309	return (unsigned long *)(task_thread_info(p) + 1);
2310}
2311
2312#endif
2313
2314static inline int object_is_on_stack(void *obj)
2315{
2316	void *stack = task_stack_page(current);
2317
2318	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2319}
2320
2321extern void thread_info_cache_init(void);
2322
2323#ifdef CONFIG_DEBUG_STACK_USAGE
2324static inline unsigned long stack_not_used(struct task_struct *p)
2325{
2326	unsigned long *n = end_of_stack(p);
2327
2328	do { 	/* Skip over canary */
2329		n++;
2330	} while (!*n);
2331
2332	return (unsigned long)n - (unsigned long)end_of_stack(p);
2333}
2334#endif
2335
2336/* set thread flags in other task's structures
2337 * - see asm/thread_info.h for TIF_xxxx flags available
2338 */
2339static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2340{
2341	set_ti_thread_flag(task_thread_info(tsk), flag);
2342}
2343
2344static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2345{
2346	clear_ti_thread_flag(task_thread_info(tsk), flag);
2347}
2348
2349static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2350{
2351	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2352}
2353
2354static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2355{
2356	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2357}
2358
2359static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2360{
2361	return test_ti_thread_flag(task_thread_info(tsk), flag);
2362}
2363
2364static inline void set_tsk_need_resched(struct task_struct *tsk)
2365{
2366	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2367}
2368
2369static inline void clear_tsk_need_resched(struct task_struct *tsk)
2370{
2371	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2372}
2373
2374static inline int test_tsk_need_resched(struct task_struct *tsk)
2375{
2376	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2377}
2378
2379static inline int restart_syscall(void)
2380{
2381	set_tsk_thread_flag(current, TIF_SIGPENDING);
2382	return -ERESTARTNOINTR;
2383}
2384
2385static inline int signal_pending(struct task_struct *p)
2386{
2387	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2388}
2389
2390static inline int __fatal_signal_pending(struct task_struct *p)
2391{
2392	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2393}
2394
2395static inline int fatal_signal_pending(struct task_struct *p)
2396{
2397	return signal_pending(p) && __fatal_signal_pending(p);
2398}
2399
2400static inline int signal_pending_state(long state, struct task_struct *p)
2401{
2402	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2403		return 0;
2404	if (!signal_pending(p))
2405		return 0;
2406
2407	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2408}
2409
2410static inline int need_resched(void)
2411{
2412	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2413}
2414
2415/*
2416 * cond_resched() and cond_resched_lock(): latency reduction via
2417 * explicit rescheduling in places that are safe. The return
2418 * value indicates whether a reschedule was done in fact.
2419 * cond_resched_lock() will drop the spinlock before scheduling,
2420 * cond_resched_softirq() will enable bhs before scheduling.
2421 */
2422extern int _cond_resched(void);
2423
2424#define cond_resched() ({			\
2425	__might_sleep(__FILE__, __LINE__, 0);	\
2426	_cond_resched();			\
2427})
2428
2429extern int __cond_resched_lock(spinlock_t *lock);
2430
2431#ifdef CONFIG_PREEMPT
2432#define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2433#else
2434#define PREEMPT_LOCK_OFFSET	0
2435#endif
2436
2437#define cond_resched_lock(lock) ({				\
2438	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2439	__cond_resched_lock(lock);				\
2440})
2441
2442extern int __cond_resched_softirq(void);
2443
2444#define cond_resched_softirq() ({					\
2445	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2446	__cond_resched_softirq();					\
2447})
2448
2449/*
2450 * Does a critical section need to be broken due to another
2451 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2452 * but a general need for low latency)
2453 */
2454static inline int spin_needbreak(spinlock_t *lock)
2455{
2456#ifdef CONFIG_PREEMPT
2457	return spin_is_contended(lock);
2458#else
2459	return 0;
2460#endif
2461}
2462
2463/*
2464 * Thread group CPU time accounting.
2465 */
2466void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2467void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2468
2469static inline void thread_group_cputime_init(struct signal_struct *sig)
2470{
2471	spin_lock_init(&sig->cputimer.lock);
2472}
2473
2474/*
2475 * Reevaluate whether the task has signals pending delivery.
2476 * Wake the task if so.
2477 * This is required every time the blocked sigset_t changes.
2478 * callers must hold sighand->siglock.
2479 */
2480extern void recalc_sigpending_and_wake(struct task_struct *t);
2481extern void recalc_sigpending(void);
2482
2483extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2484
2485/*
2486 * Wrappers for p->thread_info->cpu access. No-op on UP.
2487 */
2488#ifdef CONFIG_SMP
2489
2490static inline unsigned int task_cpu(const struct task_struct *p)
2491{
2492	return task_thread_info(p)->cpu;
2493}
2494
2495extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2496
2497#else
2498
2499static inline unsigned int task_cpu(const struct task_struct *p)
2500{
2501	return 0;
2502}
2503
2504static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2505{
2506}
2507
2508#endif /* CONFIG_SMP */
2509
2510extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2511extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2512
2513extern void normalize_rt_tasks(void);
2514
2515#ifdef CONFIG_CGROUP_SCHED
2516
2517extern struct task_group root_task_group;
2518
2519extern struct task_group *sched_create_group(struct task_group *parent);
2520extern void sched_destroy_group(struct task_group *tg);
2521extern void sched_move_task(struct task_struct *tsk);
2522#ifdef CONFIG_FAIR_GROUP_SCHED
2523extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2524extern unsigned long sched_group_shares(struct task_group *tg);
2525#endif
2526#ifdef CONFIG_RT_GROUP_SCHED
2527extern int sched_group_set_rt_runtime(struct task_group *tg,
2528				      long rt_runtime_us);
2529extern long sched_group_rt_runtime(struct task_group *tg);
2530extern int sched_group_set_rt_period(struct task_group *tg,
2531				      long rt_period_us);
2532extern long sched_group_rt_period(struct task_group *tg);
2533extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2534#endif
2535#endif
2536
2537extern int task_can_switch_user(struct user_struct *up,
2538					struct task_struct *tsk);
2539
2540#ifdef CONFIG_TASK_XACCT
2541static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2542{
2543	tsk->ioac.rchar += amt;
2544}
2545
2546static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2547{
2548	tsk->ioac.wchar += amt;
2549}
2550
2551static inline void inc_syscr(struct task_struct *tsk)
2552{
2553	tsk->ioac.syscr++;
2554}
2555
2556static inline void inc_syscw(struct task_struct *tsk)
2557{
2558	tsk->ioac.syscw++;
2559}
2560#else
2561static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2562{
2563}
2564
2565static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2566{
2567}
2568
2569static inline void inc_syscr(struct task_struct *tsk)
2570{
2571}
2572
2573static inline void inc_syscw(struct task_struct *tsk)
2574{
2575}
2576#endif
2577
2578#ifndef TASK_SIZE_OF
2579#define TASK_SIZE_OF(tsk)	TASK_SIZE
2580#endif
2581
2582/*
2583 * Call the function if the target task is executing on a CPU right now:
2584 */
2585extern void task_oncpu_function_call(struct task_struct *p,
2586				     void (*func) (void *info), void *info);
2587
2588
2589#ifdef CONFIG_MM_OWNER
2590extern void mm_update_next_owner(struct mm_struct *mm);
2591extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2592#else
2593static inline void mm_update_next_owner(struct mm_struct *mm)
2594{
2595}
2596
2597static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2598{
2599}
2600#endif /* CONFIG_MM_OWNER */
2601
2602static inline unsigned long task_rlimit(const struct task_struct *tsk,
2603		unsigned int limit)
2604{
2605	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2606}
2607
2608static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2609		unsigned int limit)
2610{
2611	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2612}
2613
2614static inline unsigned long rlimit(unsigned int limit)
2615{
2616	return task_rlimit(current, limit);
2617}
2618
2619static inline unsigned long rlimit_max(unsigned int limit)
2620{
2621	return task_rlimit_max(current, limit);
2622}
2623
2624#endif /* __KERNEL__ */
2625
2626#endif
2627