sched.h revision 1d3504fcf5606579d60b649d19f44b3871c1ddae
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4/*
5 * cloning flags:
6 */
7#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8#define CLONE_VM	0x00000100	/* set if VM shared between processes */
9#define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10#define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15#define CLONE_THREAD	0x00010000	/* Same thread group? */
16#define CLONE_NEWNS	0x00020000	/* New namespace group? */
17#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21#define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24#define CLONE_STOPPED		0x02000000	/* Start in stopped state */
25#define CLONE_NEWUTS		0x04000000	/* New utsname group? */
26#define CLONE_NEWIPC		0x08000000	/* New ipcs */
27#define CLONE_NEWUSER		0x10000000	/* New user namespace */
28#define CLONE_NEWPID		0x20000000	/* New pid namespace */
29#define CLONE_NEWNET		0x40000000	/* New network namespace */
30#define CLONE_IO		0x80000000	/* Clone io context */
31
32/*
33 * Scheduling policies
34 */
35#define SCHED_NORMAL		0
36#define SCHED_FIFO		1
37#define SCHED_RR		2
38#define SCHED_BATCH		3
39/* SCHED_ISO: reserved but not implemented yet */
40#define SCHED_IDLE		5
41
42#ifdef __KERNEL__
43
44struct sched_param {
45	int sched_priority;
46};
47
48#include <asm/param.h>	/* for HZ */
49
50#include <linux/capability.h>
51#include <linux/threads.h>
52#include <linux/kernel.h>
53#include <linux/types.h>
54#include <linux/timex.h>
55#include <linux/jiffies.h>
56#include <linux/rbtree.h>
57#include <linux/thread_info.h>
58#include <linux/cpumask.h>
59#include <linux/errno.h>
60#include <linux/nodemask.h>
61#include <linux/mm_types.h>
62
63#include <asm/system.h>
64#include <asm/semaphore.h>
65#include <asm/page.h>
66#include <asm/ptrace.h>
67#include <asm/cputime.h>
68
69#include <linux/smp.h>
70#include <linux/sem.h>
71#include <linux/signal.h>
72#include <linux/securebits.h>
73#include <linux/fs_struct.h>
74#include <linux/compiler.h>
75#include <linux/completion.h>
76#include <linux/pid.h>
77#include <linux/percpu.h>
78#include <linux/topology.h>
79#include <linux/proportions.h>
80#include <linux/seccomp.h>
81#include <linux/rcupdate.h>
82#include <linux/rtmutex.h>
83
84#include <linux/time.h>
85#include <linux/param.h>
86#include <linux/resource.h>
87#include <linux/timer.h>
88#include <linux/hrtimer.h>
89#include <linux/task_io_accounting.h>
90#include <linux/kobject.h>
91#include <linux/latencytop.h>
92
93#include <asm/processor.h>
94
95struct mem_cgroup;
96struct exec_domain;
97struct futex_pi_state;
98struct robust_list_head;
99struct bio;
100
101/*
102 * List of flags we want to share for kernel threads,
103 * if only because they are not used by them anyway.
104 */
105#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
106
107/*
108 * These are the constant used to fake the fixed-point load-average
109 * counting. Some notes:
110 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
111 *    a load-average precision of 10 bits integer + 11 bits fractional
112 *  - if you want to count load-averages more often, you need more
113 *    precision, or rounding will get you. With 2-second counting freq,
114 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
115 *    11 bit fractions.
116 */
117extern unsigned long avenrun[];		/* Load averages */
118
119#define FSHIFT		11		/* nr of bits of precision */
120#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
121#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
122#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
123#define EXP_5		2014		/* 1/exp(5sec/5min) */
124#define EXP_15		2037		/* 1/exp(5sec/15min) */
125
126#define CALC_LOAD(load,exp,n) \
127	load *= exp; \
128	load += n*(FIXED_1-exp); \
129	load >>= FSHIFT;
130
131extern unsigned long total_forks;
132extern int nr_threads;
133DECLARE_PER_CPU(unsigned long, process_counts);
134extern int nr_processes(void);
135extern unsigned long nr_running(void);
136extern unsigned long nr_uninterruptible(void);
137extern unsigned long nr_active(void);
138extern unsigned long nr_iowait(void);
139extern unsigned long weighted_cpuload(const int cpu);
140
141struct seq_file;
142struct cfs_rq;
143struct task_group;
144#ifdef CONFIG_SCHED_DEBUG
145extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
146extern void proc_sched_set_task(struct task_struct *p);
147extern void
148print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
149#else
150static inline void
151proc_sched_show_task(struct task_struct *p, struct seq_file *m)
152{
153}
154static inline void proc_sched_set_task(struct task_struct *p)
155{
156}
157static inline void
158print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
159{
160}
161#endif
162
163/*
164 * Task state bitmask. NOTE! These bits are also
165 * encoded in fs/proc/array.c: get_task_state().
166 *
167 * We have two separate sets of flags: task->state
168 * is about runnability, while task->exit_state are
169 * about the task exiting. Confusing, but this way
170 * modifying one set can't modify the other one by
171 * mistake.
172 */
173#define TASK_RUNNING		0
174#define TASK_INTERRUPTIBLE	1
175#define TASK_UNINTERRUPTIBLE	2
176#define __TASK_STOPPED		4
177#define __TASK_TRACED		8
178/* in tsk->exit_state */
179#define EXIT_ZOMBIE		16
180#define EXIT_DEAD		32
181/* in tsk->state again */
182#define TASK_DEAD		64
183#define TASK_WAKEKILL		128
184
185/* Convenience macros for the sake of set_task_state */
186#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
187#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
188#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
189
190/* Convenience macros for the sake of wake_up */
191#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
192#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
193
194/* get_task_state() */
195#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
196				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
197				 __TASK_TRACED)
198
199#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
200#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
201#define task_is_stopped_or_traced(task)	\
202			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
203#define task_contributes_to_load(task)	\
204				((task->state & TASK_UNINTERRUPTIBLE) != 0)
205
206#define __set_task_state(tsk, state_value)		\
207	do { (tsk)->state = (state_value); } while (0)
208#define set_task_state(tsk, state_value)		\
209	set_mb((tsk)->state, (state_value))
210
211/*
212 * set_current_state() includes a barrier so that the write of current->state
213 * is correctly serialised wrt the caller's subsequent test of whether to
214 * actually sleep:
215 *
216 *	set_current_state(TASK_UNINTERRUPTIBLE);
217 *	if (do_i_need_to_sleep())
218 *		schedule();
219 *
220 * If the caller does not need such serialisation then use __set_current_state()
221 */
222#define __set_current_state(state_value)			\
223	do { current->state = (state_value); } while (0)
224#define set_current_state(state_value)		\
225	set_mb(current->state, (state_value))
226
227/* Task command name length */
228#define TASK_COMM_LEN 16
229
230#include <linux/spinlock.h>
231
232/*
233 * This serializes "schedule()" and also protects
234 * the run-queue from deletions/modifications (but
235 * _adding_ to the beginning of the run-queue has
236 * a separate lock).
237 */
238extern rwlock_t tasklist_lock;
239extern spinlock_t mmlist_lock;
240
241struct task_struct;
242
243extern void sched_init(void);
244extern void sched_init_smp(void);
245extern asmlinkage void schedule_tail(struct task_struct *prev);
246extern void init_idle(struct task_struct *idle, int cpu);
247extern void init_idle_bootup_task(struct task_struct *idle);
248
249extern cpumask_t nohz_cpu_mask;
250#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
251extern int select_nohz_load_balancer(int cpu);
252#else
253static inline int select_nohz_load_balancer(int cpu)
254{
255	return 0;
256}
257#endif
258
259extern unsigned long rt_needs_cpu(int cpu);
260
261/*
262 * Only dump TASK_* tasks. (0 for all tasks)
263 */
264extern void show_state_filter(unsigned long state_filter);
265
266static inline void show_state(void)
267{
268	show_state_filter(0);
269}
270
271extern void show_regs(struct pt_regs *);
272
273/*
274 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
275 * task), SP is the stack pointer of the first frame that should be shown in the back
276 * trace (or NULL if the entire call-chain of the task should be shown).
277 */
278extern void show_stack(struct task_struct *task, unsigned long *sp);
279
280void io_schedule(void);
281long io_schedule_timeout(long timeout);
282
283extern void cpu_init (void);
284extern void trap_init(void);
285extern void account_process_tick(struct task_struct *task, int user);
286extern void update_process_times(int user);
287extern void scheduler_tick(void);
288extern void hrtick_resched(void);
289
290extern void sched_show_task(struct task_struct *p);
291
292#ifdef CONFIG_DETECT_SOFTLOCKUP
293extern void softlockup_tick(void);
294extern void spawn_softlockup_task(void);
295extern void touch_softlockup_watchdog(void);
296extern void touch_all_softlockup_watchdogs(void);
297extern unsigned long  softlockup_thresh;
298extern unsigned long sysctl_hung_task_check_count;
299extern unsigned long sysctl_hung_task_timeout_secs;
300extern unsigned long sysctl_hung_task_warnings;
301#else
302static inline void softlockup_tick(void)
303{
304}
305static inline void spawn_softlockup_task(void)
306{
307}
308static inline void touch_softlockup_watchdog(void)
309{
310}
311static inline void touch_all_softlockup_watchdogs(void)
312{
313}
314#endif
315
316
317/* Attach to any functions which should be ignored in wchan output. */
318#define __sched		__attribute__((__section__(".sched.text")))
319
320/* Linker adds these: start and end of __sched functions */
321extern char __sched_text_start[], __sched_text_end[];
322
323/* Is this address in the __sched functions? */
324extern int in_sched_functions(unsigned long addr);
325
326#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
327extern signed long schedule_timeout(signed long timeout);
328extern signed long schedule_timeout_interruptible(signed long timeout);
329extern signed long schedule_timeout_killable(signed long timeout);
330extern signed long schedule_timeout_uninterruptible(signed long timeout);
331asmlinkage void schedule(void);
332
333struct nsproxy;
334struct user_namespace;
335
336/* Maximum number of active map areas.. This is a random (large) number */
337#define DEFAULT_MAX_MAP_COUNT	65536
338
339extern int sysctl_max_map_count;
340
341#include <linux/aio.h>
342
343extern unsigned long
344arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
345		       unsigned long, unsigned long);
346extern unsigned long
347arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
348			  unsigned long len, unsigned long pgoff,
349			  unsigned long flags);
350extern void arch_unmap_area(struct mm_struct *, unsigned long);
351extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
352
353#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
354/*
355 * The mm counters are not protected by its page_table_lock,
356 * so must be incremented atomically.
357 */
358#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
359#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
360#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
361#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
362#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
363
364#else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
365/*
366 * The mm counters are protected by its page_table_lock,
367 * so can be incremented directly.
368 */
369#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
370#define get_mm_counter(mm, member) ((mm)->_##member)
371#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
372#define inc_mm_counter(mm, member) (mm)->_##member++
373#define dec_mm_counter(mm, member) (mm)->_##member--
374
375#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
376
377#define get_mm_rss(mm)					\
378	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
379#define update_hiwater_rss(mm)	do {			\
380	unsigned long _rss = get_mm_rss(mm);		\
381	if ((mm)->hiwater_rss < _rss)			\
382		(mm)->hiwater_rss = _rss;		\
383} while (0)
384#define update_hiwater_vm(mm)	do {			\
385	if ((mm)->hiwater_vm < (mm)->total_vm)		\
386		(mm)->hiwater_vm = (mm)->total_vm;	\
387} while (0)
388
389extern void set_dumpable(struct mm_struct *mm, int value);
390extern int get_dumpable(struct mm_struct *mm);
391
392/* mm flags */
393/* dumpable bits */
394#define MMF_DUMPABLE      0  /* core dump is permitted */
395#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
396#define MMF_DUMPABLE_BITS 2
397
398/* coredump filter bits */
399#define MMF_DUMP_ANON_PRIVATE	2
400#define MMF_DUMP_ANON_SHARED	3
401#define MMF_DUMP_MAPPED_PRIVATE	4
402#define MMF_DUMP_MAPPED_SHARED	5
403#define MMF_DUMP_ELF_HEADERS	6
404#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
405#define MMF_DUMP_FILTER_BITS	5
406#define MMF_DUMP_FILTER_MASK \
407	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
408#define MMF_DUMP_FILTER_DEFAULT \
409	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED))
410
411struct sighand_struct {
412	atomic_t		count;
413	struct k_sigaction	action[_NSIG];
414	spinlock_t		siglock;
415	wait_queue_head_t	signalfd_wqh;
416};
417
418struct pacct_struct {
419	int			ac_flag;
420	long			ac_exitcode;
421	unsigned long		ac_mem;
422	cputime_t		ac_utime, ac_stime;
423	unsigned long		ac_minflt, ac_majflt;
424};
425
426/*
427 * NOTE! "signal_struct" does not have it's own
428 * locking, because a shared signal_struct always
429 * implies a shared sighand_struct, so locking
430 * sighand_struct is always a proper superset of
431 * the locking of signal_struct.
432 */
433struct signal_struct {
434	atomic_t		count;
435	atomic_t		live;
436
437	wait_queue_head_t	wait_chldexit;	/* for wait4() */
438
439	/* current thread group signal load-balancing target: */
440	struct task_struct	*curr_target;
441
442	/* shared signal handling: */
443	struct sigpending	shared_pending;
444
445	/* thread group exit support */
446	int			group_exit_code;
447	/* overloaded:
448	 * - notify group_exit_task when ->count is equal to notify_count
449	 * - everyone except group_exit_task is stopped during signal delivery
450	 *   of fatal signals, group_exit_task processes the signal.
451	 */
452	struct task_struct	*group_exit_task;
453	int			notify_count;
454
455	/* thread group stop support, overloads group_exit_code too */
456	int			group_stop_count;
457	unsigned int		flags; /* see SIGNAL_* flags below */
458
459	/* POSIX.1b Interval Timers */
460	struct list_head posix_timers;
461
462	/* ITIMER_REAL timer for the process */
463	struct hrtimer real_timer;
464	struct pid *leader_pid;
465	ktime_t it_real_incr;
466
467	/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
468	cputime_t it_prof_expires, it_virt_expires;
469	cputime_t it_prof_incr, it_virt_incr;
470
471	/* job control IDs */
472
473	/*
474	 * pgrp and session fields are deprecated.
475	 * use the task_session_Xnr and task_pgrp_Xnr routines below
476	 */
477
478	union {
479		pid_t pgrp __deprecated;
480		pid_t __pgrp;
481	};
482
483	struct pid *tty_old_pgrp;
484
485	union {
486		pid_t session __deprecated;
487		pid_t __session;
488	};
489
490	/* boolean value for session group leader */
491	int leader;
492
493	struct tty_struct *tty; /* NULL if no tty */
494
495	/*
496	 * Cumulative resource counters for dead threads in the group,
497	 * and for reaped dead child processes forked by this group.
498	 * Live threads maintain their own counters and add to these
499	 * in __exit_signal, except for the group leader.
500	 */
501	cputime_t utime, stime, cutime, cstime;
502	cputime_t gtime;
503	cputime_t cgtime;
504	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
505	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
506	unsigned long inblock, oublock, cinblock, coublock;
507
508	/*
509	 * Cumulative ns of scheduled CPU time for dead threads in the
510	 * group, not including a zombie group leader.  (This only differs
511	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
512	 * other than jiffies.)
513	 */
514	unsigned long long sum_sched_runtime;
515
516	/*
517	 * We don't bother to synchronize most readers of this at all,
518	 * because there is no reader checking a limit that actually needs
519	 * to get both rlim_cur and rlim_max atomically, and either one
520	 * alone is a single word that can safely be read normally.
521	 * getrlimit/setrlimit use task_lock(current->group_leader) to
522	 * protect this instead of the siglock, because they really
523	 * have no need to disable irqs.
524	 */
525	struct rlimit rlim[RLIM_NLIMITS];
526
527	struct list_head cpu_timers[3];
528
529	/* keep the process-shared keyrings here so that they do the right
530	 * thing in threads created with CLONE_THREAD */
531#ifdef CONFIG_KEYS
532	struct key *session_keyring;	/* keyring inherited over fork */
533	struct key *process_keyring;	/* keyring private to this process */
534#endif
535#ifdef CONFIG_BSD_PROCESS_ACCT
536	struct pacct_struct pacct;	/* per-process accounting information */
537#endif
538#ifdef CONFIG_TASKSTATS
539	struct taskstats *stats;
540#endif
541#ifdef CONFIG_AUDIT
542	unsigned audit_tty;
543	struct tty_audit_buf *tty_audit_buf;
544#endif
545};
546
547/* Context switch must be unlocked if interrupts are to be enabled */
548#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
549# define __ARCH_WANT_UNLOCKED_CTXSW
550#endif
551
552/*
553 * Bits in flags field of signal_struct.
554 */
555#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
556#define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */
557#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
558#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */
559
560/* If true, all threads except ->group_exit_task have pending SIGKILL */
561static inline int signal_group_exit(const struct signal_struct *sig)
562{
563	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
564		(sig->group_exit_task != NULL);
565}
566
567/*
568 * Some day this will be a full-fledged user tracking system..
569 */
570struct user_struct {
571	atomic_t __count;	/* reference count */
572	atomic_t processes;	/* How many processes does this user have? */
573	atomic_t files;		/* How many open files does this user have? */
574	atomic_t sigpending;	/* How many pending signals does this user have? */
575#ifdef CONFIG_INOTIFY_USER
576	atomic_t inotify_watches; /* How many inotify watches does this user have? */
577	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
578#endif
579#ifdef CONFIG_POSIX_MQUEUE
580	/* protected by mq_lock	*/
581	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
582#endif
583	unsigned long locked_shm; /* How many pages of mlocked shm ? */
584
585#ifdef CONFIG_KEYS
586	struct key *uid_keyring;	/* UID specific keyring */
587	struct key *session_keyring;	/* UID's default session keyring */
588#endif
589
590	/* Hash table maintenance information */
591	struct hlist_node uidhash_node;
592	uid_t uid;
593
594#ifdef CONFIG_USER_SCHED
595	struct task_group *tg;
596#ifdef CONFIG_SYSFS
597	struct kobject kobj;
598	struct work_struct work;
599#endif
600#endif
601};
602
603extern int uids_sysfs_init(void);
604
605extern struct user_struct *find_user(uid_t);
606
607extern struct user_struct root_user;
608#define INIT_USER (&root_user)
609
610struct backing_dev_info;
611struct reclaim_state;
612
613#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
614struct sched_info {
615	/* cumulative counters */
616	unsigned long pcount;	      /* # of times run on this cpu */
617	unsigned long long cpu_time,  /* time spent on the cpu */
618			   run_delay; /* time spent waiting on a runqueue */
619
620	/* timestamps */
621	unsigned long long last_arrival,/* when we last ran on a cpu */
622			   last_queued;	/* when we were last queued to run */
623#ifdef CONFIG_SCHEDSTATS
624	/* BKL stats */
625	unsigned int bkl_count;
626#endif
627};
628#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
629
630#ifdef CONFIG_SCHEDSTATS
631extern const struct file_operations proc_schedstat_operations;
632#endif /* CONFIG_SCHEDSTATS */
633
634#ifdef CONFIG_TASK_DELAY_ACCT
635struct task_delay_info {
636	spinlock_t	lock;
637	unsigned int	flags;	/* Private per-task flags */
638
639	/* For each stat XXX, add following, aligned appropriately
640	 *
641	 * struct timespec XXX_start, XXX_end;
642	 * u64 XXX_delay;
643	 * u32 XXX_count;
644	 *
645	 * Atomicity of updates to XXX_delay, XXX_count protected by
646	 * single lock above (split into XXX_lock if contention is an issue).
647	 */
648
649	/*
650	 * XXX_count is incremented on every XXX operation, the delay
651	 * associated with the operation is added to XXX_delay.
652	 * XXX_delay contains the accumulated delay time in nanoseconds.
653	 */
654	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
655	u64 blkio_delay;	/* wait for sync block io completion */
656	u64 swapin_delay;	/* wait for swapin block io completion */
657	u32 blkio_count;	/* total count of the number of sync block */
658				/* io operations performed */
659	u32 swapin_count;	/* total count of the number of swapin block */
660				/* io operations performed */
661};
662#endif	/* CONFIG_TASK_DELAY_ACCT */
663
664static inline int sched_info_on(void)
665{
666#ifdef CONFIG_SCHEDSTATS
667	return 1;
668#elif defined(CONFIG_TASK_DELAY_ACCT)
669	extern int delayacct_on;
670	return delayacct_on;
671#else
672	return 0;
673#endif
674}
675
676enum cpu_idle_type {
677	CPU_IDLE,
678	CPU_NOT_IDLE,
679	CPU_NEWLY_IDLE,
680	CPU_MAX_IDLE_TYPES
681};
682
683/*
684 * sched-domains (multiprocessor balancing) declarations:
685 */
686
687/*
688 * Increase resolution of nice-level calculations:
689 */
690#define SCHED_LOAD_SHIFT	10
691#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
692
693#define SCHED_LOAD_SCALE_FUZZ	SCHED_LOAD_SCALE
694
695#ifdef CONFIG_SMP
696#define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */
697#define SD_BALANCE_NEWIDLE	2	/* Balance when about to become idle */
698#define SD_BALANCE_EXEC		4	/* Balance on exec */
699#define SD_BALANCE_FORK		8	/* Balance on fork, clone */
700#define SD_WAKE_IDLE		16	/* Wake to idle CPU on task wakeup */
701#define SD_WAKE_AFFINE		32	/* Wake task to waking CPU */
702#define SD_WAKE_BALANCE		64	/* Perform balancing at task wakeup */
703#define SD_SHARE_CPUPOWER	128	/* Domain members share cpu power */
704#define SD_POWERSAVINGS_BALANCE	256	/* Balance for power savings */
705#define SD_SHARE_PKG_RESOURCES	512	/* Domain members share cpu pkg resources */
706#define SD_SERIALIZE		1024	/* Only a single load balancing instance */
707#define SD_WAKE_IDLE_FAR	2048	/* Gain latency sacrificing cache hit */
708
709#define BALANCE_FOR_MC_POWER	\
710	(sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
711
712#define BALANCE_FOR_PKG_POWER	\
713	((sched_mc_power_savings || sched_smt_power_savings) ?	\
714	 SD_POWERSAVINGS_BALANCE : 0)
715
716#define test_sd_parent(sd, flag)	((sd->parent &&		\
717					 (sd->parent->flags & flag)) ? 1 : 0)
718
719
720struct sched_group {
721	struct sched_group *next;	/* Must be a circular list */
722	cpumask_t cpumask;
723
724	/*
725	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
726	 * single CPU. This is read only (except for setup, hotplug CPU).
727	 * Note : Never change cpu_power without recompute its reciprocal
728	 */
729	unsigned int __cpu_power;
730	/*
731	 * reciprocal value of cpu_power to avoid expensive divides
732	 * (see include/linux/reciprocal_div.h)
733	 */
734	u32 reciprocal_cpu_power;
735};
736
737enum sched_domain_level {
738	SD_LV_NONE = 0,
739	SD_LV_SIBLING,
740	SD_LV_MC,
741	SD_LV_CPU,
742	SD_LV_NODE,
743	SD_LV_ALLNODES,
744	SD_LV_MAX
745};
746
747struct sched_domain_attr {
748	int relax_domain_level;
749};
750
751#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
752	.relax_domain_level = -1,			\
753}
754
755struct sched_domain {
756	/* These fields must be setup */
757	struct sched_domain *parent;	/* top domain must be null terminated */
758	struct sched_domain *child;	/* bottom domain must be null terminated */
759	struct sched_group *groups;	/* the balancing groups of the domain */
760	cpumask_t span;			/* span of all CPUs in this domain */
761	unsigned long min_interval;	/* Minimum balance interval ms */
762	unsigned long max_interval;	/* Maximum balance interval ms */
763	unsigned int busy_factor;	/* less balancing by factor if busy */
764	unsigned int imbalance_pct;	/* No balance until over watermark */
765	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
766	unsigned int busy_idx;
767	unsigned int idle_idx;
768	unsigned int newidle_idx;
769	unsigned int wake_idx;
770	unsigned int forkexec_idx;
771	int flags;			/* See SD_* */
772	enum sched_domain_level level;
773
774	/* Runtime fields. */
775	unsigned long last_balance;	/* init to jiffies. units in jiffies */
776	unsigned int balance_interval;	/* initialise to 1. units in ms. */
777	unsigned int nr_balance_failed; /* initialise to 0 */
778
779#ifdef CONFIG_SCHEDSTATS
780	/* load_balance() stats */
781	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
782	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
783	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
784	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
785	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
786	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
787	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
788	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
789
790	/* Active load balancing */
791	unsigned int alb_count;
792	unsigned int alb_failed;
793	unsigned int alb_pushed;
794
795	/* SD_BALANCE_EXEC stats */
796	unsigned int sbe_count;
797	unsigned int sbe_balanced;
798	unsigned int sbe_pushed;
799
800	/* SD_BALANCE_FORK stats */
801	unsigned int sbf_count;
802	unsigned int sbf_balanced;
803	unsigned int sbf_pushed;
804
805	/* try_to_wake_up() stats */
806	unsigned int ttwu_wake_remote;
807	unsigned int ttwu_move_affine;
808	unsigned int ttwu_move_balance;
809#endif
810};
811
812extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
813				    struct sched_domain_attr *dattr_new);
814extern int arch_reinit_sched_domains(void);
815
816#endif	/* CONFIG_SMP */
817
818/*
819 * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
820 * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
821 * task of nice 0 or enough lower priority tasks to bring up the
822 * weighted_cpuload
823 */
824static inline int above_background_load(void)
825{
826	unsigned long cpu;
827
828	for_each_online_cpu(cpu) {
829		if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
830			return 1;
831	}
832	return 0;
833}
834
835struct io_context;			/* See blkdev.h */
836#define NGROUPS_SMALL		32
837#define NGROUPS_PER_BLOCK	((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
838struct group_info {
839	int ngroups;
840	atomic_t usage;
841	gid_t small_block[NGROUPS_SMALL];
842	int nblocks;
843	gid_t *blocks[0];
844};
845
846/*
847 * get_group_info() must be called with the owning task locked (via task_lock())
848 * when task != current.  The reason being that the vast majority of callers are
849 * looking at current->group_info, which can not be changed except by the
850 * current task.  Changing current->group_info requires the task lock, too.
851 */
852#define get_group_info(group_info) do { \
853	atomic_inc(&(group_info)->usage); \
854} while (0)
855
856#define put_group_info(group_info) do { \
857	if (atomic_dec_and_test(&(group_info)->usage)) \
858		groups_free(group_info); \
859} while (0)
860
861extern struct group_info *groups_alloc(int gidsetsize);
862extern void groups_free(struct group_info *group_info);
863extern int set_current_groups(struct group_info *group_info);
864extern int groups_search(struct group_info *group_info, gid_t grp);
865/* access the groups "array" with this macro */
866#define GROUP_AT(gi, i) \
867    ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
868
869#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
870extern void prefetch_stack(struct task_struct *t);
871#else
872static inline void prefetch_stack(struct task_struct *t) { }
873#endif
874
875struct audit_context;		/* See audit.c */
876struct mempolicy;
877struct pipe_inode_info;
878struct uts_namespace;
879
880struct rq;
881struct sched_domain;
882
883struct sched_class {
884	const struct sched_class *next;
885
886	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
887	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
888	void (*yield_task) (struct rq *rq);
889	int  (*select_task_rq)(struct task_struct *p, int sync);
890
891	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
892
893	struct task_struct * (*pick_next_task) (struct rq *rq);
894	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
895
896#ifdef CONFIG_SMP
897	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
898			struct rq *busiest, unsigned long max_load_move,
899			struct sched_domain *sd, enum cpu_idle_type idle,
900			int *all_pinned, int *this_best_prio);
901
902	int (*move_one_task) (struct rq *this_rq, int this_cpu,
903			      struct rq *busiest, struct sched_domain *sd,
904			      enum cpu_idle_type idle);
905	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
906	void (*post_schedule) (struct rq *this_rq);
907	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
908#endif
909
910	void (*set_curr_task) (struct rq *rq);
911	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
912	void (*task_new) (struct rq *rq, struct task_struct *p);
913	void (*set_cpus_allowed)(struct task_struct *p,
914				 const cpumask_t *newmask);
915
916	void (*join_domain)(struct rq *rq);
917	void (*leave_domain)(struct rq *rq);
918
919	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
920			       int running);
921	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
922			     int running);
923	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
924			     int oldprio, int running);
925
926#ifdef CONFIG_FAIR_GROUP_SCHED
927	void (*moved_group) (struct task_struct *p);
928#endif
929};
930
931struct load_weight {
932	unsigned long weight, inv_weight;
933};
934
935/*
936 * CFS stats for a schedulable entity (task, task-group etc)
937 *
938 * Current field usage histogram:
939 *
940 *     4 se->block_start
941 *     4 se->run_node
942 *     4 se->sleep_start
943 *     6 se->load.weight
944 */
945struct sched_entity {
946	struct load_weight	load;		/* for load-balancing */
947	struct rb_node		run_node;
948	unsigned int		on_rq;
949
950	u64			exec_start;
951	u64			sum_exec_runtime;
952	u64			vruntime;
953	u64			prev_sum_exec_runtime;
954
955	u64			last_wakeup;
956	u64			avg_overlap;
957
958#ifdef CONFIG_SCHEDSTATS
959	u64			wait_start;
960	u64			wait_max;
961	u64			wait_count;
962	u64			wait_sum;
963
964	u64			sleep_start;
965	u64			sleep_max;
966	s64			sum_sleep_runtime;
967
968	u64			block_start;
969	u64			block_max;
970	u64			exec_max;
971	u64			slice_max;
972
973	u64			nr_migrations;
974	u64			nr_migrations_cold;
975	u64			nr_failed_migrations_affine;
976	u64			nr_failed_migrations_running;
977	u64			nr_failed_migrations_hot;
978	u64			nr_forced_migrations;
979	u64			nr_forced2_migrations;
980
981	u64			nr_wakeups;
982	u64			nr_wakeups_sync;
983	u64			nr_wakeups_migrate;
984	u64			nr_wakeups_local;
985	u64			nr_wakeups_remote;
986	u64			nr_wakeups_affine;
987	u64			nr_wakeups_affine_attempts;
988	u64			nr_wakeups_passive;
989	u64			nr_wakeups_idle;
990#endif
991
992#ifdef CONFIG_FAIR_GROUP_SCHED
993	struct sched_entity	*parent;
994	/* rq on which this entity is (to be) queued: */
995	struct cfs_rq		*cfs_rq;
996	/* rq "owned" by this entity/group: */
997	struct cfs_rq		*my_q;
998#endif
999};
1000
1001struct sched_rt_entity {
1002	struct list_head run_list;
1003	unsigned int time_slice;
1004	unsigned long timeout;
1005	int nr_cpus_allowed;
1006
1007#ifdef CONFIG_RT_GROUP_SCHED
1008	struct sched_rt_entity	*parent;
1009	/* rq on which this entity is (to be) queued: */
1010	struct rt_rq		*rt_rq;
1011	/* rq "owned" by this entity/group: */
1012	struct rt_rq		*my_q;
1013#endif
1014};
1015
1016struct task_struct {
1017	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1018	void *stack;
1019	atomic_t usage;
1020	unsigned int flags;	/* per process flags, defined below */
1021	unsigned int ptrace;
1022
1023	int lock_depth;		/* BKL lock depth */
1024
1025#ifdef CONFIG_SMP
1026#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1027	int oncpu;
1028#endif
1029#endif
1030
1031	int prio, static_prio, normal_prio;
1032	const struct sched_class *sched_class;
1033	struct sched_entity se;
1034	struct sched_rt_entity rt;
1035
1036#ifdef CONFIG_PREEMPT_NOTIFIERS
1037	/* list of struct preempt_notifier: */
1038	struct hlist_head preempt_notifiers;
1039#endif
1040
1041	/*
1042	 * fpu_counter contains the number of consecutive context switches
1043	 * that the FPU is used. If this is over a threshold, the lazy fpu
1044	 * saving becomes unlazy to save the trap. This is an unsigned char
1045	 * so that after 256 times the counter wraps and the behavior turns
1046	 * lazy again; this to deal with bursty apps that only use FPU for
1047	 * a short time
1048	 */
1049	unsigned char fpu_counter;
1050	s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
1051#ifdef CONFIG_BLK_DEV_IO_TRACE
1052	unsigned int btrace_seq;
1053#endif
1054
1055	unsigned int policy;
1056	cpumask_t cpus_allowed;
1057
1058#ifdef CONFIG_PREEMPT_RCU
1059	int rcu_read_lock_nesting;
1060	int rcu_flipctr_idx;
1061#endif /* #ifdef CONFIG_PREEMPT_RCU */
1062
1063#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1064	struct sched_info sched_info;
1065#endif
1066
1067	struct list_head tasks;
1068	/*
1069	 * ptrace_list/ptrace_children forms the list of my children
1070	 * that were stolen by a ptracer.
1071	 */
1072	struct list_head ptrace_children;
1073	struct list_head ptrace_list;
1074
1075	struct mm_struct *mm, *active_mm;
1076
1077/* task state */
1078	struct linux_binfmt *binfmt;
1079	int exit_state;
1080	int exit_code, exit_signal;
1081	int pdeath_signal;  /*  The signal sent when the parent dies  */
1082	/* ??? */
1083	unsigned int personality;
1084	unsigned did_exec:1;
1085	pid_t pid;
1086	pid_t tgid;
1087
1088#ifdef CONFIG_CC_STACKPROTECTOR
1089	/* Canary value for the -fstack-protector gcc feature */
1090	unsigned long stack_canary;
1091#endif
1092	/*
1093	 * pointers to (original) parent process, youngest child, younger sibling,
1094	 * older sibling, respectively.  (p->father can be replaced with
1095	 * p->parent->pid)
1096	 */
1097	struct task_struct *real_parent; /* real parent process (when being debugged) */
1098	struct task_struct *parent;	/* parent process */
1099	/*
1100	 * children/sibling forms the list of my children plus the
1101	 * tasks I'm ptracing.
1102	 */
1103	struct list_head children;	/* list of my children */
1104	struct list_head sibling;	/* linkage in my parent's children list */
1105	struct task_struct *group_leader;	/* threadgroup leader */
1106
1107	/* PID/PID hash table linkage. */
1108	struct pid_link pids[PIDTYPE_MAX];
1109	struct list_head thread_group;
1110
1111	struct completion *vfork_done;		/* for vfork() */
1112	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1113	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1114
1115	unsigned int rt_priority;
1116	cputime_t utime, stime, utimescaled, stimescaled;
1117	cputime_t gtime;
1118	cputime_t prev_utime, prev_stime;
1119	unsigned long nvcsw, nivcsw; /* context switch counts */
1120	struct timespec start_time; 		/* monotonic time */
1121	struct timespec real_start_time;	/* boot based time */
1122/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1123	unsigned long min_flt, maj_flt;
1124
1125  	cputime_t it_prof_expires, it_virt_expires;
1126	unsigned long long it_sched_expires;
1127	struct list_head cpu_timers[3];
1128
1129/* process credentials */
1130	uid_t uid,euid,suid,fsuid;
1131	gid_t gid,egid,sgid,fsgid;
1132	struct group_info *group_info;
1133	kernel_cap_t   cap_effective, cap_inheritable, cap_permitted, cap_bset;
1134	unsigned keep_capabilities:1;
1135	struct user_struct *user;
1136#ifdef CONFIG_KEYS
1137	struct key *request_key_auth;	/* assumed request_key authority */
1138	struct key *thread_keyring;	/* keyring private to this thread */
1139	unsigned char jit_keyring;	/* default keyring to attach requested keys to */
1140#endif
1141	char comm[TASK_COMM_LEN]; /* executable name excluding path
1142				     - access with [gs]et_task_comm (which lock
1143				       it with task_lock())
1144				     - initialized normally by flush_old_exec */
1145/* file system info */
1146	int link_count, total_link_count;
1147#ifdef CONFIG_SYSVIPC
1148/* ipc stuff */
1149	struct sysv_sem sysvsem;
1150#endif
1151#ifdef CONFIG_DETECT_SOFTLOCKUP
1152/* hung task detection */
1153	unsigned long last_switch_timestamp;
1154	unsigned long last_switch_count;
1155#endif
1156/* CPU-specific state of this task */
1157	struct thread_struct thread;
1158/* filesystem information */
1159	struct fs_struct *fs;
1160/* open file information */
1161	struct files_struct *files;
1162/* namespaces */
1163	struct nsproxy *nsproxy;
1164/* signal handlers */
1165	struct signal_struct *signal;
1166	struct sighand_struct *sighand;
1167
1168	sigset_t blocked, real_blocked;
1169	sigset_t saved_sigmask;		/* To be restored with TIF_RESTORE_SIGMASK */
1170	struct sigpending pending;
1171
1172	unsigned long sas_ss_sp;
1173	size_t sas_ss_size;
1174	int (*notifier)(void *priv);
1175	void *notifier_data;
1176	sigset_t *notifier_mask;
1177#ifdef CONFIG_SECURITY
1178	void *security;
1179#endif
1180	struct audit_context *audit_context;
1181#ifdef CONFIG_AUDITSYSCALL
1182	uid_t loginuid;
1183	unsigned int sessionid;
1184#endif
1185	seccomp_t seccomp;
1186
1187/* Thread group tracking */
1188   	u32 parent_exec_id;
1189   	u32 self_exec_id;
1190/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
1191	spinlock_t alloc_lock;
1192
1193	/* Protection of the PI data structures: */
1194	spinlock_t pi_lock;
1195
1196#ifdef CONFIG_RT_MUTEXES
1197	/* PI waiters blocked on a rt_mutex held by this task */
1198	struct plist_head pi_waiters;
1199	/* Deadlock detection and priority inheritance handling */
1200	struct rt_mutex_waiter *pi_blocked_on;
1201#endif
1202
1203#ifdef CONFIG_DEBUG_MUTEXES
1204	/* mutex deadlock detection */
1205	struct mutex_waiter *blocked_on;
1206#endif
1207#ifdef CONFIG_TRACE_IRQFLAGS
1208	unsigned int irq_events;
1209	int hardirqs_enabled;
1210	unsigned long hardirq_enable_ip;
1211	unsigned int hardirq_enable_event;
1212	unsigned long hardirq_disable_ip;
1213	unsigned int hardirq_disable_event;
1214	int softirqs_enabled;
1215	unsigned long softirq_disable_ip;
1216	unsigned int softirq_disable_event;
1217	unsigned long softirq_enable_ip;
1218	unsigned int softirq_enable_event;
1219	int hardirq_context;
1220	int softirq_context;
1221#endif
1222#ifdef CONFIG_LOCKDEP
1223# define MAX_LOCK_DEPTH 48UL
1224	u64 curr_chain_key;
1225	int lockdep_depth;
1226	struct held_lock held_locks[MAX_LOCK_DEPTH];
1227	unsigned int lockdep_recursion;
1228#endif
1229
1230/* journalling filesystem info */
1231	void *journal_info;
1232
1233/* stacked block device info */
1234	struct bio *bio_list, **bio_tail;
1235
1236/* VM state */
1237	struct reclaim_state *reclaim_state;
1238
1239	struct backing_dev_info *backing_dev_info;
1240
1241	struct io_context *io_context;
1242
1243	unsigned long ptrace_message;
1244	siginfo_t *last_siginfo; /* For ptrace use.  */
1245#ifdef CONFIG_TASK_XACCT
1246/* i/o counters(bytes read/written, #syscalls */
1247	u64 rchar, wchar, syscr, syscw;
1248#endif
1249	struct task_io_accounting ioac;
1250#if defined(CONFIG_TASK_XACCT)
1251	u64 acct_rss_mem1;	/* accumulated rss usage */
1252	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1253	cputime_t acct_stimexpd;/* stime since last update */
1254#endif
1255#ifdef CONFIG_NUMA
1256  	struct mempolicy *mempolicy;
1257	short il_next;
1258#endif
1259#ifdef CONFIG_CPUSETS
1260	nodemask_t mems_allowed;
1261	int cpuset_mems_generation;
1262	int cpuset_mem_spread_rotor;
1263#endif
1264#ifdef CONFIG_CGROUPS
1265	/* Control Group info protected by css_set_lock */
1266	struct css_set *cgroups;
1267	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1268	struct list_head cg_list;
1269#endif
1270#ifdef CONFIG_FUTEX
1271	struct robust_list_head __user *robust_list;
1272#ifdef CONFIG_COMPAT
1273	struct compat_robust_list_head __user *compat_robust_list;
1274#endif
1275	struct list_head pi_state_list;
1276	struct futex_pi_state *pi_state_cache;
1277#endif
1278	atomic_t fs_excl;	/* holding fs exclusive resources */
1279	struct rcu_head rcu;
1280
1281	/*
1282	 * cache last used pipe for splice
1283	 */
1284	struct pipe_inode_info *splice_pipe;
1285#ifdef	CONFIG_TASK_DELAY_ACCT
1286	struct task_delay_info *delays;
1287#endif
1288#ifdef CONFIG_FAULT_INJECTION
1289	int make_it_fail;
1290#endif
1291	struct prop_local_single dirties;
1292#ifdef CONFIG_LATENCYTOP
1293	int latency_record_count;
1294	struct latency_record latency_record[LT_SAVECOUNT];
1295#endif
1296};
1297
1298/*
1299 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1300 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1301 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1302 * values are inverted: lower p->prio value means higher priority.
1303 *
1304 * The MAX_USER_RT_PRIO value allows the actual maximum
1305 * RT priority to be separate from the value exported to
1306 * user-space.  This allows kernel threads to set their
1307 * priority to a value higher than any user task. Note:
1308 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1309 */
1310
1311#define MAX_USER_RT_PRIO	100
1312#define MAX_RT_PRIO		MAX_USER_RT_PRIO
1313
1314#define MAX_PRIO		(MAX_RT_PRIO + 40)
1315#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1316
1317static inline int rt_prio(int prio)
1318{
1319	if (unlikely(prio < MAX_RT_PRIO))
1320		return 1;
1321	return 0;
1322}
1323
1324static inline int rt_task(struct task_struct *p)
1325{
1326	return rt_prio(p->prio);
1327}
1328
1329static inline void set_task_session(struct task_struct *tsk, pid_t session)
1330{
1331	tsk->signal->__session = session;
1332}
1333
1334static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
1335{
1336	tsk->signal->__pgrp = pgrp;
1337}
1338
1339static inline struct pid *task_pid(struct task_struct *task)
1340{
1341	return task->pids[PIDTYPE_PID].pid;
1342}
1343
1344static inline struct pid *task_tgid(struct task_struct *task)
1345{
1346	return task->group_leader->pids[PIDTYPE_PID].pid;
1347}
1348
1349static inline struct pid *task_pgrp(struct task_struct *task)
1350{
1351	return task->group_leader->pids[PIDTYPE_PGID].pid;
1352}
1353
1354static inline struct pid *task_session(struct task_struct *task)
1355{
1356	return task->group_leader->pids[PIDTYPE_SID].pid;
1357}
1358
1359struct pid_namespace;
1360
1361/*
1362 * the helpers to get the task's different pids as they are seen
1363 * from various namespaces
1364 *
1365 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1366 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1367 *                     current.
1368 * task_xid_nr_ns()  : id seen from the ns specified;
1369 *
1370 * set_task_vxid()   : assigns a virtual id to a task;
1371 *
1372 * see also pid_nr() etc in include/linux/pid.h
1373 */
1374
1375static inline pid_t task_pid_nr(struct task_struct *tsk)
1376{
1377	return tsk->pid;
1378}
1379
1380pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1381
1382static inline pid_t task_pid_vnr(struct task_struct *tsk)
1383{
1384	return pid_vnr(task_pid(tsk));
1385}
1386
1387
1388static inline pid_t task_tgid_nr(struct task_struct *tsk)
1389{
1390	return tsk->tgid;
1391}
1392
1393pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1394
1395static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1396{
1397	return pid_vnr(task_tgid(tsk));
1398}
1399
1400
1401static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1402{
1403	return tsk->signal->__pgrp;
1404}
1405
1406pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1407
1408static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1409{
1410	return pid_vnr(task_pgrp(tsk));
1411}
1412
1413
1414static inline pid_t task_session_nr(struct task_struct *tsk)
1415{
1416	return tsk->signal->__session;
1417}
1418
1419pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1420
1421static inline pid_t task_session_vnr(struct task_struct *tsk)
1422{
1423	return pid_vnr(task_session(tsk));
1424}
1425
1426
1427/**
1428 * pid_alive - check that a task structure is not stale
1429 * @p: Task structure to be checked.
1430 *
1431 * Test if a process is not yet dead (at most zombie state)
1432 * If pid_alive fails, then pointers within the task structure
1433 * can be stale and must not be dereferenced.
1434 */
1435static inline int pid_alive(struct task_struct *p)
1436{
1437	return p->pids[PIDTYPE_PID].pid != NULL;
1438}
1439
1440/**
1441 * is_global_init - check if a task structure is init
1442 * @tsk: Task structure to be checked.
1443 *
1444 * Check if a task structure is the first user space task the kernel created.
1445 */
1446static inline int is_global_init(struct task_struct *tsk)
1447{
1448	return tsk->pid == 1;
1449}
1450
1451/*
1452 * is_container_init:
1453 * check whether in the task is init in its own pid namespace.
1454 */
1455extern int is_container_init(struct task_struct *tsk);
1456
1457extern struct pid *cad_pid;
1458
1459extern void free_task(struct task_struct *tsk);
1460#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1461
1462extern void __put_task_struct(struct task_struct *t);
1463
1464static inline void put_task_struct(struct task_struct *t)
1465{
1466	if (atomic_dec_and_test(&t->usage))
1467		__put_task_struct(t);
1468}
1469
1470/*
1471 * Per process flags
1472 */
1473#define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */
1474					/* Not implemented yet, only for 486*/
1475#define PF_STARTING	0x00000002	/* being created */
1476#define PF_EXITING	0x00000004	/* getting shut down */
1477#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1478#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1479#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1480#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1481#define PF_DUMPCORE	0x00000200	/* dumped core */
1482#define PF_SIGNALED	0x00000400	/* killed by a signal */
1483#define PF_MEMALLOC	0x00000800	/* Allocating memory */
1484#define PF_FLUSHER	0x00001000	/* responsible for disk writeback */
1485#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1486#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1487#define PF_FROZEN	0x00010000	/* frozen for system suspend */
1488#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1489#define PF_KSWAPD	0x00040000	/* I am kswapd */
1490#define PF_SWAPOFF	0x00080000	/* I am in swapoff */
1491#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1492#define PF_BORROWED_MM	0x00200000	/* I am a kthread doing use_mm */
1493#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1494#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1495#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1496#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1497#define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1498#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1499#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
1500
1501/*
1502 * Only the _current_ task can read/write to tsk->flags, but other
1503 * tasks can access tsk->flags in readonly mode for example
1504 * with tsk_used_math (like during threaded core dumping).
1505 * There is however an exception to this rule during ptrace
1506 * or during fork: the ptracer task is allowed to write to the
1507 * child->flags of its traced child (same goes for fork, the parent
1508 * can write to the child->flags), because we're guaranteed the
1509 * child is not running and in turn not changing child->flags
1510 * at the same time the parent does it.
1511 */
1512#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1513#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1514#define clear_used_math() clear_stopped_child_used_math(current)
1515#define set_used_math() set_stopped_child_used_math(current)
1516#define conditional_stopped_child_used_math(condition, child) \
1517	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1518#define conditional_used_math(condition) \
1519	conditional_stopped_child_used_math(condition, current)
1520#define copy_to_stopped_child_used_math(child) \
1521	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1522/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1523#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1524#define used_math() tsk_used_math(current)
1525
1526#ifdef CONFIG_SMP
1527extern int set_cpus_allowed_ptr(struct task_struct *p,
1528				const cpumask_t *new_mask);
1529#else
1530static inline int set_cpus_allowed_ptr(struct task_struct *p,
1531				       const cpumask_t *new_mask)
1532{
1533	if (!cpu_isset(0, *new_mask))
1534		return -EINVAL;
1535	return 0;
1536}
1537#endif
1538static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1539{
1540	return set_cpus_allowed_ptr(p, &new_mask);
1541}
1542
1543extern unsigned long long sched_clock(void);
1544
1545/*
1546 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1547 * clock constructed from sched_clock():
1548 */
1549extern unsigned long long cpu_clock(int cpu);
1550
1551extern unsigned long long
1552task_sched_runtime(struct task_struct *task);
1553
1554/* sched_exec is called by processes performing an exec */
1555#ifdef CONFIG_SMP
1556extern void sched_exec(void);
1557#else
1558#define sched_exec()   {}
1559#endif
1560
1561extern void sched_clock_idle_sleep_event(void);
1562extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1563
1564#ifdef CONFIG_HOTPLUG_CPU
1565extern void idle_task_exit(void);
1566#else
1567static inline void idle_task_exit(void) {}
1568#endif
1569
1570extern void sched_idle_next(void);
1571
1572#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1573extern void wake_up_idle_cpu(int cpu);
1574#else
1575static inline void wake_up_idle_cpu(int cpu) { }
1576#endif
1577
1578#ifdef CONFIG_SCHED_DEBUG
1579extern unsigned int sysctl_sched_latency;
1580extern unsigned int sysctl_sched_min_granularity;
1581extern unsigned int sysctl_sched_wakeup_granularity;
1582extern unsigned int sysctl_sched_child_runs_first;
1583extern unsigned int sysctl_sched_features;
1584extern unsigned int sysctl_sched_migration_cost;
1585extern unsigned int sysctl_sched_nr_migrate;
1586
1587int sched_nr_latency_handler(struct ctl_table *table, int write,
1588		struct file *file, void __user *buffer, size_t *length,
1589		loff_t *ppos);
1590#endif
1591extern unsigned int sysctl_sched_rt_period;
1592extern int sysctl_sched_rt_runtime;
1593
1594int sched_rt_handler(struct ctl_table *table, int write,
1595		struct file *filp, void __user *buffer, size_t *lenp,
1596		loff_t *ppos);
1597
1598extern unsigned int sysctl_sched_compat_yield;
1599
1600#ifdef CONFIG_RT_MUTEXES
1601extern int rt_mutex_getprio(struct task_struct *p);
1602extern void rt_mutex_setprio(struct task_struct *p, int prio);
1603extern void rt_mutex_adjust_pi(struct task_struct *p);
1604#else
1605static inline int rt_mutex_getprio(struct task_struct *p)
1606{
1607	return p->normal_prio;
1608}
1609# define rt_mutex_adjust_pi(p)		do { } while (0)
1610#endif
1611
1612extern void set_user_nice(struct task_struct *p, long nice);
1613extern int task_prio(const struct task_struct *p);
1614extern int task_nice(const struct task_struct *p);
1615extern int can_nice(const struct task_struct *p, const int nice);
1616extern int task_curr(const struct task_struct *p);
1617extern int idle_cpu(int cpu);
1618extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1619extern struct task_struct *idle_task(int cpu);
1620extern struct task_struct *curr_task(int cpu);
1621extern void set_curr_task(int cpu, struct task_struct *p);
1622
1623void yield(void);
1624
1625/*
1626 * The default (Linux) execution domain.
1627 */
1628extern struct exec_domain	default_exec_domain;
1629
1630union thread_union {
1631	struct thread_info thread_info;
1632	unsigned long stack[THREAD_SIZE/sizeof(long)];
1633};
1634
1635#ifndef __HAVE_ARCH_KSTACK_END
1636static inline int kstack_end(void *addr)
1637{
1638	/* Reliable end of stack detection:
1639	 * Some APM bios versions misalign the stack
1640	 */
1641	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1642}
1643#endif
1644
1645extern union thread_union init_thread_union;
1646extern struct task_struct init_task;
1647
1648extern struct   mm_struct init_mm;
1649
1650extern struct pid_namespace init_pid_ns;
1651
1652/*
1653 * find a task by one of its numerical ids
1654 *
1655 * find_task_by_pid_type_ns():
1656 *      it is the most generic call - it finds a task by all id,
1657 *      type and namespace specified
1658 * find_task_by_pid_ns():
1659 *      finds a task by its pid in the specified namespace
1660 * find_task_by_vpid():
1661 *      finds a task by its virtual pid
1662 * find_task_by_pid():
1663 *      finds a task by its global pid
1664 *
1665 * see also find_pid() etc in include/linux/pid.h
1666 */
1667
1668extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
1669		struct pid_namespace *ns);
1670
1671extern struct task_struct *find_task_by_pid(pid_t nr);
1672extern struct task_struct *find_task_by_vpid(pid_t nr);
1673extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1674		struct pid_namespace *ns);
1675
1676extern void __set_special_pids(struct pid *pid);
1677
1678/* per-UID process charging. */
1679extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
1680static inline struct user_struct *get_uid(struct user_struct *u)
1681{
1682	atomic_inc(&u->__count);
1683	return u;
1684}
1685extern void free_uid(struct user_struct *);
1686extern void switch_uid(struct user_struct *);
1687extern void release_uids(struct user_namespace *ns);
1688
1689#include <asm/current.h>
1690
1691extern void do_timer(unsigned long ticks);
1692
1693extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1694extern int wake_up_process(struct task_struct *tsk);
1695extern void wake_up_new_task(struct task_struct *tsk,
1696				unsigned long clone_flags);
1697#ifdef CONFIG_SMP
1698 extern void kick_process(struct task_struct *tsk);
1699#else
1700 static inline void kick_process(struct task_struct *tsk) { }
1701#endif
1702extern void sched_fork(struct task_struct *p, int clone_flags);
1703extern void sched_dead(struct task_struct *p);
1704
1705extern int in_group_p(gid_t);
1706extern int in_egroup_p(gid_t);
1707
1708extern void proc_caches_init(void);
1709extern void flush_signals(struct task_struct *);
1710extern void ignore_signals(struct task_struct *);
1711extern void flush_signal_handlers(struct task_struct *, int force_default);
1712extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
1713
1714static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
1715{
1716	unsigned long flags;
1717	int ret;
1718
1719	spin_lock_irqsave(&tsk->sighand->siglock, flags);
1720	ret = dequeue_signal(tsk, mask, info);
1721	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
1722
1723	return ret;
1724}
1725
1726extern void block_all_signals(int (*notifier)(void *priv), void *priv,
1727			      sigset_t *mask);
1728extern void unblock_all_signals(void);
1729extern void release_task(struct task_struct * p);
1730extern int send_sig_info(int, struct siginfo *, struct task_struct *);
1731extern int force_sigsegv(int, struct task_struct *);
1732extern int force_sig_info(int, struct siginfo *, struct task_struct *);
1733extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
1734extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
1735extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
1736extern int kill_pgrp(struct pid *pid, int sig, int priv);
1737extern int kill_pid(struct pid *pid, int sig, int priv);
1738extern int kill_proc_info(int, struct siginfo *, pid_t);
1739extern void do_notify_parent(struct task_struct *, int);
1740extern void force_sig(int, struct task_struct *);
1741extern void force_sig_specific(int, struct task_struct *);
1742extern int send_sig(int, struct task_struct *, int);
1743extern void zap_other_threads(struct task_struct *p);
1744extern int kill_proc(pid_t, int, int);
1745extern struct sigqueue *sigqueue_alloc(void);
1746extern void sigqueue_free(struct sigqueue *);
1747extern int send_sigqueue(int, struct sigqueue *,  struct task_struct *);
1748extern int send_group_sigqueue(int, struct sigqueue *,  struct task_struct *);
1749extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
1750extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
1751
1752static inline int kill_cad_pid(int sig, int priv)
1753{
1754	return kill_pid(cad_pid, sig, priv);
1755}
1756
1757/* These can be the second arg to send_sig_info/send_group_sig_info.  */
1758#define SEND_SIG_NOINFO ((struct siginfo *) 0)
1759#define SEND_SIG_PRIV	((struct siginfo *) 1)
1760#define SEND_SIG_FORCED	((struct siginfo *) 2)
1761
1762static inline int is_si_special(const struct siginfo *info)
1763{
1764	return info <= SEND_SIG_FORCED;
1765}
1766
1767/* True if we are on the alternate signal stack.  */
1768
1769static inline int on_sig_stack(unsigned long sp)
1770{
1771	return (sp - current->sas_ss_sp < current->sas_ss_size);
1772}
1773
1774static inline int sas_ss_flags(unsigned long sp)
1775{
1776	return (current->sas_ss_size == 0 ? SS_DISABLE
1777		: on_sig_stack(sp) ? SS_ONSTACK : 0);
1778}
1779
1780/*
1781 * Routines for handling mm_structs
1782 */
1783extern struct mm_struct * mm_alloc(void);
1784
1785/* mmdrop drops the mm and the page tables */
1786extern void __mmdrop(struct mm_struct *);
1787static inline void mmdrop(struct mm_struct * mm)
1788{
1789	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
1790		__mmdrop(mm);
1791}
1792
1793/* mmput gets rid of the mappings and all user-space */
1794extern void mmput(struct mm_struct *);
1795/* Grab a reference to a task's mm, if it is not already going away */
1796extern struct mm_struct *get_task_mm(struct task_struct *task);
1797/* Remove the current tasks stale references to the old mm_struct */
1798extern void mm_release(struct task_struct *, struct mm_struct *);
1799
1800extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
1801extern void flush_thread(void);
1802extern void exit_thread(void);
1803
1804extern void exit_files(struct task_struct *);
1805extern void __cleanup_signal(struct signal_struct *);
1806extern void __cleanup_sighand(struct sighand_struct *);
1807extern void exit_itimers(struct signal_struct *);
1808
1809extern NORET_TYPE void do_group_exit(int);
1810
1811extern void daemonize(const char *, ...);
1812extern int allow_signal(int);
1813extern int disallow_signal(int);
1814
1815extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
1816extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1817struct task_struct *fork_idle(int);
1818
1819extern void set_task_comm(struct task_struct *tsk, char *from);
1820extern char *get_task_comm(char *to, struct task_struct *tsk);
1821
1822#ifdef CONFIG_SMP
1823extern void wait_task_inactive(struct task_struct * p);
1824#else
1825#define wait_task_inactive(p)	do { } while (0)
1826#endif
1827
1828#define remove_parent(p)	list_del_init(&(p)->sibling)
1829#define add_parent(p)		list_add_tail(&(p)->sibling,&(p)->parent->children)
1830
1831#define next_task(p)	list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
1832
1833#define for_each_process(p) \
1834	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
1835
1836/*
1837 * Careful: do_each_thread/while_each_thread is a double loop so
1838 *          'break' will not work as expected - use goto instead.
1839 */
1840#define do_each_thread(g, t) \
1841	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
1842
1843#define while_each_thread(g, t) \
1844	while ((t = next_thread(t)) != g)
1845
1846/* de_thread depends on thread_group_leader not being a pid based check */
1847#define thread_group_leader(p)	(p == p->group_leader)
1848
1849/* Do to the insanities of de_thread it is possible for a process
1850 * to have the pid of the thread group leader without actually being
1851 * the thread group leader.  For iteration through the pids in proc
1852 * all we care about is that we have a task with the appropriate
1853 * pid, we don't actually care if we have the right task.
1854 */
1855static inline int has_group_leader_pid(struct task_struct *p)
1856{
1857	return p->pid == p->tgid;
1858}
1859
1860static inline
1861int same_thread_group(struct task_struct *p1, struct task_struct *p2)
1862{
1863	return p1->tgid == p2->tgid;
1864}
1865
1866static inline struct task_struct *next_thread(const struct task_struct *p)
1867{
1868	return list_entry(rcu_dereference(p->thread_group.next),
1869			  struct task_struct, thread_group);
1870}
1871
1872static inline int thread_group_empty(struct task_struct *p)
1873{
1874	return list_empty(&p->thread_group);
1875}
1876
1877#define delay_group_leader(p) \
1878		(thread_group_leader(p) && !thread_group_empty(p))
1879
1880/*
1881 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
1882 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
1883 * pins the final release of task.io_context.  Also protects ->cpuset and
1884 * ->cgroup.subsys[].
1885 *
1886 * Nests both inside and outside of read_lock(&tasklist_lock).
1887 * It must not be nested with write_lock_irq(&tasklist_lock),
1888 * neither inside nor outside.
1889 */
1890static inline void task_lock(struct task_struct *p)
1891{
1892	spin_lock(&p->alloc_lock);
1893}
1894
1895static inline void task_unlock(struct task_struct *p)
1896{
1897	spin_unlock(&p->alloc_lock);
1898}
1899
1900extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
1901							unsigned long *flags);
1902
1903static inline void unlock_task_sighand(struct task_struct *tsk,
1904						unsigned long *flags)
1905{
1906	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
1907}
1908
1909#ifndef __HAVE_THREAD_FUNCTIONS
1910
1911#define task_thread_info(task)	((struct thread_info *)(task)->stack)
1912#define task_stack_page(task)	((task)->stack)
1913
1914static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1915{
1916	*task_thread_info(p) = *task_thread_info(org);
1917	task_thread_info(p)->task = p;
1918}
1919
1920static inline unsigned long *end_of_stack(struct task_struct *p)
1921{
1922	return (unsigned long *)(task_thread_info(p) + 1);
1923}
1924
1925#endif
1926
1927/* set thread flags in other task's structures
1928 * - see asm/thread_info.h for TIF_xxxx flags available
1929 */
1930static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1931{
1932	set_ti_thread_flag(task_thread_info(tsk), flag);
1933}
1934
1935static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1936{
1937	clear_ti_thread_flag(task_thread_info(tsk), flag);
1938}
1939
1940static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1941{
1942	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1943}
1944
1945static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1946{
1947	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1948}
1949
1950static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1951{
1952	return test_ti_thread_flag(task_thread_info(tsk), flag);
1953}
1954
1955static inline void set_tsk_need_resched(struct task_struct *tsk)
1956{
1957	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1958}
1959
1960static inline void clear_tsk_need_resched(struct task_struct *tsk)
1961{
1962	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1963}
1964
1965static inline int signal_pending(struct task_struct *p)
1966{
1967	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
1968}
1969
1970extern int __fatal_signal_pending(struct task_struct *p);
1971
1972static inline int fatal_signal_pending(struct task_struct *p)
1973{
1974	return signal_pending(p) && __fatal_signal_pending(p);
1975}
1976
1977static inline int need_resched(void)
1978{
1979	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
1980}
1981
1982/*
1983 * cond_resched() and cond_resched_lock(): latency reduction via
1984 * explicit rescheduling in places that are safe. The return
1985 * value indicates whether a reschedule was done in fact.
1986 * cond_resched_lock() will drop the spinlock before scheduling,
1987 * cond_resched_softirq() will enable bhs before scheduling.
1988 */
1989#ifdef CONFIG_PREEMPT
1990static inline int cond_resched(void)
1991{
1992	return 0;
1993}
1994#else
1995extern int _cond_resched(void);
1996static inline int cond_resched(void)
1997{
1998	return _cond_resched();
1999}
2000#endif
2001extern int cond_resched_lock(spinlock_t * lock);
2002extern int cond_resched_softirq(void);
2003
2004/*
2005 * Does a critical section need to be broken due to another
2006 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2007 * but a general need for low latency)
2008 */
2009static inline int spin_needbreak(spinlock_t *lock)
2010{
2011#ifdef CONFIG_PREEMPT
2012	return spin_is_contended(lock);
2013#else
2014	return 0;
2015#endif
2016}
2017
2018/*
2019 * Reevaluate whether the task has signals pending delivery.
2020 * Wake the task if so.
2021 * This is required every time the blocked sigset_t changes.
2022 * callers must hold sighand->siglock.
2023 */
2024extern void recalc_sigpending_and_wake(struct task_struct *t);
2025extern void recalc_sigpending(void);
2026
2027extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2028
2029/*
2030 * Wrappers for p->thread_info->cpu access. No-op on UP.
2031 */
2032#ifdef CONFIG_SMP
2033
2034static inline unsigned int task_cpu(const struct task_struct *p)
2035{
2036	return task_thread_info(p)->cpu;
2037}
2038
2039extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2040
2041#else
2042
2043static inline unsigned int task_cpu(const struct task_struct *p)
2044{
2045	return 0;
2046}
2047
2048static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2049{
2050}
2051
2052#endif /* CONFIG_SMP */
2053
2054#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
2055extern void arch_pick_mmap_layout(struct mm_struct *mm);
2056#else
2057static inline void arch_pick_mmap_layout(struct mm_struct *mm)
2058{
2059	mm->mmap_base = TASK_UNMAPPED_BASE;
2060	mm->get_unmapped_area = arch_get_unmapped_area;
2061	mm->unmap_area = arch_unmap_area;
2062}
2063#endif
2064
2065extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
2066extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
2067
2068extern int sched_mc_power_savings, sched_smt_power_savings;
2069
2070extern void normalize_rt_tasks(void);
2071
2072#ifdef CONFIG_GROUP_SCHED
2073
2074extern struct task_group init_task_group;
2075#ifdef CONFIG_USER_SCHED
2076extern struct task_group root_task_group;
2077#endif
2078
2079extern struct task_group *sched_create_group(struct task_group *parent);
2080extern void sched_destroy_group(struct task_group *tg);
2081extern void sched_move_task(struct task_struct *tsk);
2082#ifdef CONFIG_FAIR_GROUP_SCHED
2083extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2084extern unsigned long sched_group_shares(struct task_group *tg);
2085#endif
2086#ifdef CONFIG_RT_GROUP_SCHED
2087extern int sched_group_set_rt_runtime(struct task_group *tg,
2088				      long rt_runtime_us);
2089extern long sched_group_rt_runtime(struct task_group *tg);
2090extern int sched_group_set_rt_period(struct task_group *tg,
2091				      long rt_period_us);
2092extern long sched_group_rt_period(struct task_group *tg);
2093#endif
2094#endif
2095
2096#ifdef CONFIG_TASK_XACCT
2097static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2098{
2099	tsk->rchar += amt;
2100}
2101
2102static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2103{
2104	tsk->wchar += amt;
2105}
2106
2107static inline void inc_syscr(struct task_struct *tsk)
2108{
2109	tsk->syscr++;
2110}
2111
2112static inline void inc_syscw(struct task_struct *tsk)
2113{
2114	tsk->syscw++;
2115}
2116#else
2117static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2118{
2119}
2120
2121static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2122{
2123}
2124
2125static inline void inc_syscr(struct task_struct *tsk)
2126{
2127}
2128
2129static inline void inc_syscw(struct task_struct *tsk)
2130{
2131}
2132#endif
2133
2134#ifdef CONFIG_SMP
2135void migration_init(void);
2136#else
2137static inline void migration_init(void)
2138{
2139}
2140#endif
2141
2142#ifndef TASK_SIZE_OF
2143#define TASK_SIZE_OF(tsk)	TASK_SIZE
2144#endif
2145
2146#endif /* __KERNEL__ */
2147
2148#endif
2149