core.c revision f319da0c6894fcf55e21320e40506418a2aad629
1/*
2 *  kernel/sched/core.c
3 *
4 *  Kernel scheduler and related syscalls
5 *
6 *  Copyright (C) 1991-2002  Linus Torvalds
7 *
8 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9 *		make semaphores SMP safe
10 *  1998-11-19	Implemented schedule_timeout() and related stuff
11 *		by Andrea Arcangeli
12 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
13 *		hybrid priority-list and round-robin design with
14 *		an array-switch method of distributing timeslices
15 *		and per-CPU runqueues.  Cleanups and useful suggestions
16 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
17 *  2003-09-03	Interactivity tuning by Con Kolivas.
18 *  2004-04-02	Scheduler domains code by Nick Piggin
19 *  2007-04-15  Work begun on replacing all interactivity tuning with a
20 *              fair scheduling design by Con Kolivas.
21 *  2007-05-05  Load balancing (smp-nice) and other improvements
22 *              by Peter Williams
23 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
24 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
25 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 *              Thomas Gleixner, Mike Kravetz
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
33#include <linux/uaccess.h>
34#include <linux/highmem.h>
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
37#include <linux/capability.h>
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
40#include <linux/debug_locks.h>
41#include <linux/perf_event.h>
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
45#include <linux/freezer.h>
46#include <linux/vmalloc.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/pid_namespace.h>
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
57#include <linux/proc_fs.h>
58#include <linux/seq_file.h>
59#include <linux/sysctl.h>
60#include <linux/syscalls.h>
61#include <linux/times.h>
62#include <linux/tsacct_kern.h>
63#include <linux/kprobes.h>
64#include <linux/delayacct.h>
65#include <linux/unistd.h>
66#include <linux/pagemap.h>
67#include <linux/hrtimer.h>
68#include <linux/tick.h>
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
71#include <linux/ftrace.h>
72#include <linux/slab.h>
73#include <linux/init_task.h>
74#include <linux/binfmts.h>
75
76#include <asm/switch_to.h>
77#include <asm/tlb.h>
78#include <asm/irq_regs.h>
79#include <asm/mutex.h>
80#ifdef CONFIG_PARAVIRT
81#include <asm/paravirt.h>
82#endif
83
84#include "sched.h"
85#include "../workqueue_sched.h"
86#include "../smpboot.h"
87
88#define CREATE_TRACE_POINTS
89#include <trace/events/sched.h>
90
91void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
92{
93	unsigned long delta;
94	ktime_t soft, hard, now;
95
96	for (;;) {
97		if (hrtimer_active(period_timer))
98			break;
99
100		now = hrtimer_cb_get_time(period_timer);
101		hrtimer_forward(period_timer, now, period);
102
103		soft = hrtimer_get_softexpires(period_timer);
104		hard = hrtimer_get_expires(period_timer);
105		delta = ktime_to_ns(ktime_sub(hard, soft));
106		__hrtimer_start_range_ns(period_timer, soft, delta,
107					 HRTIMER_MODE_ABS_PINNED, 0);
108	}
109}
110
111DEFINE_MUTEX(sched_domains_mutex);
112DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
113
114static void update_rq_clock_task(struct rq *rq, s64 delta);
115
116void update_rq_clock(struct rq *rq)
117{
118	s64 delta;
119
120	if (rq->skip_clock_update > 0)
121		return;
122
123	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
124	rq->clock += delta;
125	update_rq_clock_task(rq, delta);
126}
127
128/*
129 * Debugging: various feature bits
130 */
131
132#define SCHED_FEAT(name, enabled)	\
133	(1UL << __SCHED_FEAT_##name) * enabled |
134
135const_debug unsigned int sysctl_sched_features =
136#include "features.h"
137	0;
138
139#undef SCHED_FEAT
140
141#ifdef CONFIG_SCHED_DEBUG
142#define SCHED_FEAT(name, enabled)	\
143	#name ,
144
145static const char * const sched_feat_names[] = {
146#include "features.h"
147};
148
149#undef SCHED_FEAT
150
151static int sched_feat_show(struct seq_file *m, void *v)
152{
153	int i;
154
155	for (i = 0; i < __SCHED_FEAT_NR; i++) {
156		if (!(sysctl_sched_features & (1UL << i)))
157			seq_puts(m, "NO_");
158		seq_printf(m, "%s ", sched_feat_names[i]);
159	}
160	seq_puts(m, "\n");
161
162	return 0;
163}
164
165#ifdef HAVE_JUMP_LABEL
166
167#define jump_label_key__true  STATIC_KEY_INIT_TRUE
168#define jump_label_key__false STATIC_KEY_INIT_FALSE
169
170#define SCHED_FEAT(name, enabled)	\
171	jump_label_key__##enabled ,
172
173struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
174#include "features.h"
175};
176
177#undef SCHED_FEAT
178
179static void sched_feat_disable(int i)
180{
181	if (static_key_enabled(&sched_feat_keys[i]))
182		static_key_slow_dec(&sched_feat_keys[i]);
183}
184
185static void sched_feat_enable(int i)
186{
187	if (!static_key_enabled(&sched_feat_keys[i]))
188		static_key_slow_inc(&sched_feat_keys[i]);
189}
190#else
191static void sched_feat_disable(int i) { };
192static void sched_feat_enable(int i) { };
193#endif /* HAVE_JUMP_LABEL */
194
195static ssize_t
196sched_feat_write(struct file *filp, const char __user *ubuf,
197		size_t cnt, loff_t *ppos)
198{
199	char buf[64];
200	char *cmp;
201	int neg = 0;
202	int i;
203
204	if (cnt > 63)
205		cnt = 63;
206
207	if (copy_from_user(&buf, ubuf, cnt))
208		return -EFAULT;
209
210	buf[cnt] = 0;
211	cmp = strstrip(buf);
212
213	if (strncmp(cmp, "NO_", 3) == 0) {
214		neg = 1;
215		cmp += 3;
216	}
217
218	for (i = 0; i < __SCHED_FEAT_NR; i++) {
219		if (strcmp(cmp, sched_feat_names[i]) == 0) {
220			if (neg) {
221				sysctl_sched_features &= ~(1UL << i);
222				sched_feat_disable(i);
223			} else {
224				sysctl_sched_features |= (1UL << i);
225				sched_feat_enable(i);
226			}
227			break;
228		}
229	}
230
231	if (i == __SCHED_FEAT_NR)
232		return -EINVAL;
233
234	*ppos += cnt;
235
236	return cnt;
237}
238
239static int sched_feat_open(struct inode *inode, struct file *filp)
240{
241	return single_open(filp, sched_feat_show, NULL);
242}
243
244static const struct file_operations sched_feat_fops = {
245	.open		= sched_feat_open,
246	.write		= sched_feat_write,
247	.read		= seq_read,
248	.llseek		= seq_lseek,
249	.release	= single_release,
250};
251
252static __init int sched_init_debug(void)
253{
254	debugfs_create_file("sched_features", 0644, NULL, NULL,
255			&sched_feat_fops);
256
257	return 0;
258}
259late_initcall(sched_init_debug);
260#endif /* CONFIG_SCHED_DEBUG */
261
262/*
263 * Number of tasks to iterate in a single balance run.
264 * Limited because this is done with IRQs disabled.
265 */
266const_debug unsigned int sysctl_sched_nr_migrate = 32;
267
268/*
269 * period over which we average the RT time consumption, measured
270 * in ms.
271 *
272 * default: 1s
273 */
274const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
275
276/*
277 * period over which we measure -rt task cpu usage in us.
278 * default: 1s
279 */
280unsigned int sysctl_sched_rt_period = 1000000;
281
282__read_mostly int scheduler_running;
283
284/*
285 * part of the period that we allow rt tasks to run in us.
286 * default: 0.95s
287 */
288int sysctl_sched_rt_runtime = 950000;
289
290
291
292/*
293 * __task_rq_lock - lock the rq @p resides on.
294 */
295static inline struct rq *__task_rq_lock(struct task_struct *p)
296	__acquires(rq->lock)
297{
298	struct rq *rq;
299
300	lockdep_assert_held(&p->pi_lock);
301
302	for (;;) {
303		rq = task_rq(p);
304		raw_spin_lock(&rq->lock);
305		if (likely(rq == task_rq(p)))
306			return rq;
307		raw_spin_unlock(&rq->lock);
308	}
309}
310
311/*
312 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
313 */
314static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
315	__acquires(p->pi_lock)
316	__acquires(rq->lock)
317{
318	struct rq *rq;
319
320	for (;;) {
321		raw_spin_lock_irqsave(&p->pi_lock, *flags);
322		rq = task_rq(p);
323		raw_spin_lock(&rq->lock);
324		if (likely(rq == task_rq(p)))
325			return rq;
326		raw_spin_unlock(&rq->lock);
327		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
328	}
329}
330
331static void __task_rq_unlock(struct rq *rq)
332	__releases(rq->lock)
333{
334	raw_spin_unlock(&rq->lock);
335}
336
337static inline void
338task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
339	__releases(rq->lock)
340	__releases(p->pi_lock)
341{
342	raw_spin_unlock(&rq->lock);
343	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
344}
345
346/*
347 * this_rq_lock - lock this runqueue and disable interrupts.
348 */
349static struct rq *this_rq_lock(void)
350	__acquires(rq->lock)
351{
352	struct rq *rq;
353
354	local_irq_disable();
355	rq = this_rq();
356	raw_spin_lock(&rq->lock);
357
358	return rq;
359}
360
361#ifdef CONFIG_SCHED_HRTICK
362/*
363 * Use HR-timers to deliver accurate preemption points.
364 *
365 * Its all a bit involved since we cannot program an hrt while holding the
366 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
367 * reschedule event.
368 *
369 * When we get rescheduled we reprogram the hrtick_timer outside of the
370 * rq->lock.
371 */
372
373static void hrtick_clear(struct rq *rq)
374{
375	if (hrtimer_active(&rq->hrtick_timer))
376		hrtimer_cancel(&rq->hrtick_timer);
377}
378
379/*
380 * High-resolution timer tick.
381 * Runs from hardirq context with interrupts disabled.
382 */
383static enum hrtimer_restart hrtick(struct hrtimer *timer)
384{
385	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
386
387	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
388
389	raw_spin_lock(&rq->lock);
390	update_rq_clock(rq);
391	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
392	raw_spin_unlock(&rq->lock);
393
394	return HRTIMER_NORESTART;
395}
396
397#ifdef CONFIG_SMP
398/*
399 * called from hardirq (IPI) context
400 */
401static void __hrtick_start(void *arg)
402{
403	struct rq *rq = arg;
404
405	raw_spin_lock(&rq->lock);
406	hrtimer_restart(&rq->hrtick_timer);
407	rq->hrtick_csd_pending = 0;
408	raw_spin_unlock(&rq->lock);
409}
410
411/*
412 * Called to set the hrtick timer state.
413 *
414 * called with rq->lock held and irqs disabled
415 */
416void hrtick_start(struct rq *rq, u64 delay)
417{
418	struct hrtimer *timer = &rq->hrtick_timer;
419	ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
420
421	hrtimer_set_expires(timer, time);
422
423	if (rq == this_rq()) {
424		hrtimer_restart(timer);
425	} else if (!rq->hrtick_csd_pending) {
426		__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
427		rq->hrtick_csd_pending = 1;
428	}
429}
430
431static int
432hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
433{
434	int cpu = (int)(long)hcpu;
435
436	switch (action) {
437	case CPU_UP_CANCELED:
438	case CPU_UP_CANCELED_FROZEN:
439	case CPU_DOWN_PREPARE:
440	case CPU_DOWN_PREPARE_FROZEN:
441	case CPU_DEAD:
442	case CPU_DEAD_FROZEN:
443		hrtick_clear(cpu_rq(cpu));
444		return NOTIFY_OK;
445	}
446
447	return NOTIFY_DONE;
448}
449
450static __init void init_hrtick(void)
451{
452	hotcpu_notifier(hotplug_hrtick, 0);
453}
454#else
455/*
456 * Called to set the hrtick timer state.
457 *
458 * called with rq->lock held and irqs disabled
459 */
460void hrtick_start(struct rq *rq, u64 delay)
461{
462	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
463			HRTIMER_MODE_REL_PINNED, 0);
464}
465
466static inline void init_hrtick(void)
467{
468}
469#endif /* CONFIG_SMP */
470
471static void init_rq_hrtick(struct rq *rq)
472{
473#ifdef CONFIG_SMP
474	rq->hrtick_csd_pending = 0;
475
476	rq->hrtick_csd.flags = 0;
477	rq->hrtick_csd.func = __hrtick_start;
478	rq->hrtick_csd.info = rq;
479#endif
480
481	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
482	rq->hrtick_timer.function = hrtick;
483}
484#else	/* CONFIG_SCHED_HRTICK */
485static inline void hrtick_clear(struct rq *rq)
486{
487}
488
489static inline void init_rq_hrtick(struct rq *rq)
490{
491}
492
493static inline void init_hrtick(void)
494{
495}
496#endif	/* CONFIG_SCHED_HRTICK */
497
498/*
499 * resched_task - mark a task 'to be rescheduled now'.
500 *
501 * On UP this means the setting of the need_resched flag, on SMP it
502 * might also involve a cross-CPU call to trigger the scheduler on
503 * the target CPU.
504 */
505#ifdef CONFIG_SMP
506
507#ifndef tsk_is_polling
508#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
509#endif
510
511void resched_task(struct task_struct *p)
512{
513	int cpu;
514
515	assert_raw_spin_locked(&task_rq(p)->lock);
516
517	if (test_tsk_need_resched(p))
518		return;
519
520	set_tsk_need_resched(p);
521
522	cpu = task_cpu(p);
523	if (cpu == smp_processor_id())
524		return;
525
526	/* NEED_RESCHED must be visible before we test polling */
527	smp_mb();
528	if (!tsk_is_polling(p))
529		smp_send_reschedule(cpu);
530}
531
532void resched_cpu(int cpu)
533{
534	struct rq *rq = cpu_rq(cpu);
535	unsigned long flags;
536
537	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
538		return;
539	resched_task(cpu_curr(cpu));
540	raw_spin_unlock_irqrestore(&rq->lock, flags);
541}
542
543#ifdef CONFIG_NO_HZ
544/*
545 * In the semi idle case, use the nearest busy cpu for migrating timers
546 * from an idle cpu.  This is good for power-savings.
547 *
548 * We don't do similar optimization for completely idle system, as
549 * selecting an idle cpu will add more delays to the timers than intended
550 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
551 */
552int get_nohz_timer_target(void)
553{
554	int cpu = smp_processor_id();
555	int i;
556	struct sched_domain *sd;
557
558	rcu_read_lock();
559	for_each_domain(cpu, sd) {
560		for_each_cpu(i, sched_domain_span(sd)) {
561			if (!idle_cpu(i)) {
562				cpu = i;
563				goto unlock;
564			}
565		}
566	}
567unlock:
568	rcu_read_unlock();
569	return cpu;
570}
571/*
572 * When add_timer_on() enqueues a timer into the timer wheel of an
573 * idle CPU then this timer might expire before the next timer event
574 * which is scheduled to wake up that CPU. In case of a completely
575 * idle system the next event might even be infinite time into the
576 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
577 * leaves the inner idle loop so the newly added timer is taken into
578 * account when the CPU goes back to idle and evaluates the timer
579 * wheel for the next timer event.
580 */
581void wake_up_idle_cpu(int cpu)
582{
583	struct rq *rq = cpu_rq(cpu);
584
585	if (cpu == smp_processor_id())
586		return;
587
588	/*
589	 * This is safe, as this function is called with the timer
590	 * wheel base lock of (cpu) held. When the CPU is on the way
591	 * to idle and has not yet set rq->curr to idle then it will
592	 * be serialized on the timer wheel base lock and take the new
593	 * timer into account automatically.
594	 */
595	if (rq->curr != rq->idle)
596		return;
597
598	/*
599	 * We can set TIF_RESCHED on the idle task of the other CPU
600	 * lockless. The worst case is that the other CPU runs the
601	 * idle task through an additional NOOP schedule()
602	 */
603	set_tsk_need_resched(rq->idle);
604
605	/* NEED_RESCHED must be visible before we test polling */
606	smp_mb();
607	if (!tsk_is_polling(rq->idle))
608		smp_send_reschedule(cpu);
609}
610
611static inline bool got_nohz_idle_kick(void)
612{
613	int cpu = smp_processor_id();
614	return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
615}
616
617#else /* CONFIG_NO_HZ */
618
619static inline bool got_nohz_idle_kick(void)
620{
621	return false;
622}
623
624#endif /* CONFIG_NO_HZ */
625
626void sched_avg_update(struct rq *rq)
627{
628	s64 period = sched_avg_period();
629
630	while ((s64)(rq->clock - rq->age_stamp) > period) {
631		/*
632		 * Inline assembly required to prevent the compiler
633		 * optimising this loop into a divmod call.
634		 * See __iter_div_u64_rem() for another example of this.
635		 */
636		asm("" : "+rm" (rq->age_stamp));
637		rq->age_stamp += period;
638		rq->rt_avg /= 2;
639	}
640}
641
642#else /* !CONFIG_SMP */
643void resched_task(struct task_struct *p)
644{
645	assert_raw_spin_locked(&task_rq(p)->lock);
646	set_tsk_need_resched(p);
647}
648#endif /* CONFIG_SMP */
649
650#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
651			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
652/*
653 * Iterate task_group tree rooted at *from, calling @down when first entering a
654 * node and @up when leaving it for the final time.
655 *
656 * Caller must hold rcu_lock or sufficient equivalent.
657 */
658int walk_tg_tree_from(struct task_group *from,
659			     tg_visitor down, tg_visitor up, void *data)
660{
661	struct task_group *parent, *child;
662	int ret;
663
664	parent = from;
665
666down:
667	ret = (*down)(parent, data);
668	if (ret)
669		goto out;
670	list_for_each_entry_rcu(child, &parent->children, siblings) {
671		parent = child;
672		goto down;
673
674up:
675		continue;
676	}
677	ret = (*up)(parent, data);
678	if (ret || parent == from)
679		goto out;
680
681	child = parent;
682	parent = parent->parent;
683	if (parent)
684		goto up;
685out:
686	return ret;
687}
688
689int tg_nop(struct task_group *tg, void *data)
690{
691	return 0;
692}
693#endif
694
695static void set_load_weight(struct task_struct *p)
696{
697	int prio = p->static_prio - MAX_RT_PRIO;
698	struct load_weight *load = &p->se.load;
699
700	/*
701	 * SCHED_IDLE tasks get minimal weight:
702	 */
703	if (p->policy == SCHED_IDLE) {
704		load->weight = scale_load(WEIGHT_IDLEPRIO);
705		load->inv_weight = WMULT_IDLEPRIO;
706		return;
707	}
708
709	load->weight = scale_load(prio_to_weight[prio]);
710	load->inv_weight = prio_to_wmult[prio];
711}
712
713static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
714{
715	update_rq_clock(rq);
716	sched_info_queued(p);
717	p->sched_class->enqueue_task(rq, p, flags);
718}
719
720static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
721{
722	update_rq_clock(rq);
723	sched_info_dequeued(p);
724	p->sched_class->dequeue_task(rq, p, flags);
725}
726
727void activate_task(struct rq *rq, struct task_struct *p, int flags)
728{
729	if (task_contributes_to_load(p))
730		rq->nr_uninterruptible--;
731
732	enqueue_task(rq, p, flags);
733}
734
735void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
736{
737	if (task_contributes_to_load(p))
738		rq->nr_uninterruptible++;
739
740	dequeue_task(rq, p, flags);
741}
742
743#ifdef CONFIG_IRQ_TIME_ACCOUNTING
744
745/*
746 * There are no locks covering percpu hardirq/softirq time.
747 * They are only modified in account_system_vtime, on corresponding CPU
748 * with interrupts disabled. So, writes are safe.
749 * They are read and saved off onto struct rq in update_rq_clock().
750 * This may result in other CPU reading this CPU's irq time and can
751 * race with irq/account_system_vtime on this CPU. We would either get old
752 * or new value with a side effect of accounting a slice of irq time to wrong
753 * task when irq is in progress while we read rq->clock. That is a worthy
754 * compromise in place of having locks on each irq in account_system_time.
755 */
756static DEFINE_PER_CPU(u64, cpu_hardirq_time);
757static DEFINE_PER_CPU(u64, cpu_softirq_time);
758
759static DEFINE_PER_CPU(u64, irq_start_time);
760static int sched_clock_irqtime;
761
762void enable_sched_clock_irqtime(void)
763{
764	sched_clock_irqtime = 1;
765}
766
767void disable_sched_clock_irqtime(void)
768{
769	sched_clock_irqtime = 0;
770}
771
772#ifndef CONFIG_64BIT
773static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
774
775static inline void irq_time_write_begin(void)
776{
777	__this_cpu_inc(irq_time_seq.sequence);
778	smp_wmb();
779}
780
781static inline void irq_time_write_end(void)
782{
783	smp_wmb();
784	__this_cpu_inc(irq_time_seq.sequence);
785}
786
787static inline u64 irq_time_read(int cpu)
788{
789	u64 irq_time;
790	unsigned seq;
791
792	do {
793		seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
794		irq_time = per_cpu(cpu_softirq_time, cpu) +
795			   per_cpu(cpu_hardirq_time, cpu);
796	} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
797
798	return irq_time;
799}
800#else /* CONFIG_64BIT */
801static inline void irq_time_write_begin(void)
802{
803}
804
805static inline void irq_time_write_end(void)
806{
807}
808
809static inline u64 irq_time_read(int cpu)
810{
811	return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
812}
813#endif /* CONFIG_64BIT */
814
815/*
816 * Called before incrementing preempt_count on {soft,}irq_enter
817 * and before decrementing preempt_count on {soft,}irq_exit.
818 */
819void account_system_vtime(struct task_struct *curr)
820{
821	unsigned long flags;
822	s64 delta;
823	int cpu;
824
825	if (!sched_clock_irqtime)
826		return;
827
828	local_irq_save(flags);
829
830	cpu = smp_processor_id();
831	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
832	__this_cpu_add(irq_start_time, delta);
833
834	irq_time_write_begin();
835	/*
836	 * We do not account for softirq time from ksoftirqd here.
837	 * We want to continue accounting softirq time to ksoftirqd thread
838	 * in that case, so as not to confuse scheduler with a special task
839	 * that do not consume any time, but still wants to run.
840	 */
841	if (hardirq_count())
842		__this_cpu_add(cpu_hardirq_time, delta);
843	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
844		__this_cpu_add(cpu_softirq_time, delta);
845
846	irq_time_write_end();
847	local_irq_restore(flags);
848}
849EXPORT_SYMBOL_GPL(account_system_vtime);
850
851#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
852
853#ifdef CONFIG_PARAVIRT
854static inline u64 steal_ticks(u64 steal)
855{
856	if (unlikely(steal > NSEC_PER_SEC))
857		return div_u64(steal, TICK_NSEC);
858
859	return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
860}
861#endif
862
863static void update_rq_clock_task(struct rq *rq, s64 delta)
864{
865/*
866 * In theory, the compile should just see 0 here, and optimize out the call
867 * to sched_rt_avg_update. But I don't trust it...
868 */
869#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
870	s64 steal = 0, irq_delta = 0;
871#endif
872#ifdef CONFIG_IRQ_TIME_ACCOUNTING
873	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
874
875	/*
876	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
877	 * this case when a previous update_rq_clock() happened inside a
878	 * {soft,}irq region.
879	 *
880	 * When this happens, we stop ->clock_task and only update the
881	 * prev_irq_time stamp to account for the part that fit, so that a next
882	 * update will consume the rest. This ensures ->clock_task is
883	 * monotonic.
884	 *
885	 * It does however cause some slight miss-attribution of {soft,}irq
886	 * time, a more accurate solution would be to update the irq_time using
887	 * the current rq->clock timestamp, except that would require using
888	 * atomic ops.
889	 */
890	if (irq_delta > delta)
891		irq_delta = delta;
892
893	rq->prev_irq_time += irq_delta;
894	delta -= irq_delta;
895#endif
896#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
897	if (static_key_false((&paravirt_steal_rq_enabled))) {
898		u64 st;
899
900		steal = paravirt_steal_clock(cpu_of(rq));
901		steal -= rq->prev_steal_time_rq;
902
903		if (unlikely(steal > delta))
904			steal = delta;
905
906		st = steal_ticks(steal);
907		steal = st * TICK_NSEC;
908
909		rq->prev_steal_time_rq += steal;
910
911		delta -= steal;
912	}
913#endif
914
915	rq->clock_task += delta;
916
917#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
918	if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
919		sched_rt_avg_update(rq, irq_delta + steal);
920#endif
921}
922
923#ifdef CONFIG_IRQ_TIME_ACCOUNTING
924static int irqtime_account_hi_update(void)
925{
926	u64 *cpustat = kcpustat_this_cpu->cpustat;
927	unsigned long flags;
928	u64 latest_ns;
929	int ret = 0;
930
931	local_irq_save(flags);
932	latest_ns = this_cpu_read(cpu_hardirq_time);
933	if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
934		ret = 1;
935	local_irq_restore(flags);
936	return ret;
937}
938
939static int irqtime_account_si_update(void)
940{
941	u64 *cpustat = kcpustat_this_cpu->cpustat;
942	unsigned long flags;
943	u64 latest_ns;
944	int ret = 0;
945
946	local_irq_save(flags);
947	latest_ns = this_cpu_read(cpu_softirq_time);
948	if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
949		ret = 1;
950	local_irq_restore(flags);
951	return ret;
952}
953
954#else /* CONFIG_IRQ_TIME_ACCOUNTING */
955
956#define sched_clock_irqtime	(0)
957
958#endif
959
960void sched_set_stop_task(int cpu, struct task_struct *stop)
961{
962	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
963	struct task_struct *old_stop = cpu_rq(cpu)->stop;
964
965	if (stop) {
966		/*
967		 * Make it appear like a SCHED_FIFO task, its something
968		 * userspace knows about and won't get confused about.
969		 *
970		 * Also, it will make PI more or less work without too
971		 * much confusion -- but then, stop work should not
972		 * rely on PI working anyway.
973		 */
974		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
975
976		stop->sched_class = &stop_sched_class;
977	}
978
979	cpu_rq(cpu)->stop = stop;
980
981	if (old_stop) {
982		/*
983		 * Reset it back to a normal scheduling class so that
984		 * it can die in pieces.
985		 */
986		old_stop->sched_class = &rt_sched_class;
987	}
988}
989
990/*
991 * __normal_prio - return the priority that is based on the static prio
992 */
993static inline int __normal_prio(struct task_struct *p)
994{
995	return p->static_prio;
996}
997
998/*
999 * Calculate the expected normal priority: i.e. priority
1000 * without taking RT-inheritance into account. Might be
1001 * boosted by interactivity modifiers. Changes upon fork,
1002 * setprio syscalls, and whenever the interactivity
1003 * estimator recalculates.
1004 */
1005static inline int normal_prio(struct task_struct *p)
1006{
1007	int prio;
1008
1009	if (task_has_rt_policy(p))
1010		prio = MAX_RT_PRIO-1 - p->rt_priority;
1011	else
1012		prio = __normal_prio(p);
1013	return prio;
1014}
1015
1016/*
1017 * Calculate the current priority, i.e. the priority
1018 * taken into account by the scheduler. This value might
1019 * be boosted by RT tasks, or might be boosted by
1020 * interactivity modifiers. Will be RT if the task got
1021 * RT-boosted. If not then it returns p->normal_prio.
1022 */
1023static int effective_prio(struct task_struct *p)
1024{
1025	p->normal_prio = normal_prio(p);
1026	/*
1027	 * If we are RT tasks or we were boosted to RT priority,
1028	 * keep the priority unchanged. Otherwise, update priority
1029	 * to the normal priority:
1030	 */
1031	if (!rt_prio(p->prio))
1032		return p->normal_prio;
1033	return p->prio;
1034}
1035
1036/**
1037 * task_curr - is this task currently executing on a CPU?
1038 * @p: the task in question.
1039 */
1040inline int task_curr(const struct task_struct *p)
1041{
1042	return cpu_curr(task_cpu(p)) == p;
1043}
1044
1045static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1046				       const struct sched_class *prev_class,
1047				       int oldprio)
1048{
1049	if (prev_class != p->sched_class) {
1050		if (prev_class->switched_from)
1051			prev_class->switched_from(rq, p);
1052		p->sched_class->switched_to(rq, p);
1053	} else if (oldprio != p->prio)
1054		p->sched_class->prio_changed(rq, p, oldprio);
1055}
1056
1057void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1058{
1059	const struct sched_class *class;
1060
1061	if (p->sched_class == rq->curr->sched_class) {
1062		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1063	} else {
1064		for_each_class(class) {
1065			if (class == rq->curr->sched_class)
1066				break;
1067			if (class == p->sched_class) {
1068				resched_task(rq->curr);
1069				break;
1070			}
1071		}
1072	}
1073
1074	/*
1075	 * A queue event has occurred, and we're going to schedule.  In
1076	 * this case, we can save a useless back to back clock update.
1077	 */
1078	if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1079		rq->skip_clock_update = 1;
1080}
1081
1082#ifdef CONFIG_SMP
1083void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1084{
1085#ifdef CONFIG_SCHED_DEBUG
1086	/*
1087	 * We should never call set_task_cpu() on a blocked task,
1088	 * ttwu() will sort out the placement.
1089	 */
1090	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1091			!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
1092
1093#ifdef CONFIG_LOCKDEP
1094	/*
1095	 * The caller should hold either p->pi_lock or rq->lock, when changing
1096	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1097	 *
1098	 * sched_move_task() holds both and thus holding either pins the cgroup,
1099	 * see task_group().
1100	 *
1101	 * Furthermore, all task_rq users should acquire both locks, see
1102	 * task_rq_lock().
1103	 */
1104	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1105				      lockdep_is_held(&task_rq(p)->lock)));
1106#endif
1107#endif
1108
1109	trace_sched_migrate_task(p, new_cpu);
1110
1111	if (task_cpu(p) != new_cpu) {
1112		p->se.nr_migrations++;
1113		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
1114	}
1115
1116	__set_task_cpu(p, new_cpu);
1117}
1118
1119struct migration_arg {
1120	struct task_struct *task;
1121	int dest_cpu;
1122};
1123
1124static int migration_cpu_stop(void *data);
1125
1126/*
1127 * wait_task_inactive - wait for a thread to unschedule.
1128 *
1129 * If @match_state is nonzero, it's the @p->state value just checked and
1130 * not expected to change.  If it changes, i.e. @p might have woken up,
1131 * then return zero.  When we succeed in waiting for @p to be off its CPU,
1132 * we return a positive number (its total switch count).  If a second call
1133 * a short while later returns the same number, the caller can be sure that
1134 * @p has remained unscheduled the whole time.
1135 *
1136 * The caller must ensure that the task *will* unschedule sometime soon,
1137 * else this function might spin for a *long* time. This function can't
1138 * be called with interrupts off, or it may introduce deadlock with
1139 * smp_call_function() if an IPI is sent by the same process we are
1140 * waiting to become inactive.
1141 */
1142unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1143{
1144	unsigned long flags;
1145	int running, on_rq;
1146	unsigned long ncsw;
1147	struct rq *rq;
1148
1149	for (;;) {
1150		/*
1151		 * We do the initial early heuristics without holding
1152		 * any task-queue locks at all. We'll only try to get
1153		 * the runqueue lock when things look like they will
1154		 * work out!
1155		 */
1156		rq = task_rq(p);
1157
1158		/*
1159		 * If the task is actively running on another CPU
1160		 * still, just relax and busy-wait without holding
1161		 * any locks.
1162		 *
1163		 * NOTE! Since we don't hold any locks, it's not
1164		 * even sure that "rq" stays as the right runqueue!
1165		 * But we don't care, since "task_running()" will
1166		 * return false if the runqueue has changed and p
1167		 * is actually now running somewhere else!
1168		 */
1169		while (task_running(rq, p)) {
1170			if (match_state && unlikely(p->state != match_state))
1171				return 0;
1172			cpu_relax();
1173		}
1174
1175		/*
1176		 * Ok, time to look more closely! We need the rq
1177		 * lock now, to be *sure*. If we're wrong, we'll
1178		 * just go back and repeat.
1179		 */
1180		rq = task_rq_lock(p, &flags);
1181		trace_sched_wait_task(p);
1182		running = task_running(rq, p);
1183		on_rq = p->on_rq;
1184		ncsw = 0;
1185		if (!match_state || p->state == match_state)
1186			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1187		task_rq_unlock(rq, p, &flags);
1188
1189		/*
1190		 * If it changed from the expected state, bail out now.
1191		 */
1192		if (unlikely(!ncsw))
1193			break;
1194
1195		/*
1196		 * Was it really running after all now that we
1197		 * checked with the proper locks actually held?
1198		 *
1199		 * Oops. Go back and try again..
1200		 */
1201		if (unlikely(running)) {
1202			cpu_relax();
1203			continue;
1204		}
1205
1206		/*
1207		 * It's not enough that it's not actively running,
1208		 * it must be off the runqueue _entirely_, and not
1209		 * preempted!
1210		 *
1211		 * So if it was still runnable (but just not actively
1212		 * running right now), it's preempted, and we should
1213		 * yield - it could be a while.
1214		 */
1215		if (unlikely(on_rq)) {
1216			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1217
1218			set_current_state(TASK_UNINTERRUPTIBLE);
1219			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1220			continue;
1221		}
1222
1223		/*
1224		 * Ahh, all good. It wasn't running, and it wasn't
1225		 * runnable, which means that it will never become
1226		 * running in the future either. We're all done!
1227		 */
1228		break;
1229	}
1230
1231	return ncsw;
1232}
1233
1234/***
1235 * kick_process - kick a running thread to enter/exit the kernel
1236 * @p: the to-be-kicked thread
1237 *
1238 * Cause a process which is running on another CPU to enter
1239 * kernel-mode, without any delay. (to get signals handled.)
1240 *
1241 * NOTE: this function doesn't have to take the runqueue lock,
1242 * because all it wants to ensure is that the remote task enters
1243 * the kernel. If the IPI races and the task has been migrated
1244 * to another CPU then no harm is done and the purpose has been
1245 * achieved as well.
1246 */
1247void kick_process(struct task_struct *p)
1248{
1249	int cpu;
1250
1251	preempt_disable();
1252	cpu = task_cpu(p);
1253	if ((cpu != smp_processor_id()) && task_curr(p))
1254		smp_send_reschedule(cpu);
1255	preempt_enable();
1256}
1257EXPORT_SYMBOL_GPL(kick_process);
1258#endif /* CONFIG_SMP */
1259
1260#ifdef CONFIG_SMP
1261/*
1262 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1263 */
1264static int select_fallback_rq(int cpu, struct task_struct *p)
1265{
1266	const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1267	enum { cpuset, possible, fail } state = cpuset;
1268	int dest_cpu;
1269
1270	/* Look for allowed, online CPU in same node. */
1271	for_each_cpu(dest_cpu, nodemask) {
1272		if (!cpu_online(dest_cpu))
1273			continue;
1274		if (!cpu_active(dest_cpu))
1275			continue;
1276		if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1277			return dest_cpu;
1278	}
1279
1280	for (;;) {
1281		/* Any allowed, online CPU? */
1282		for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1283			if (!cpu_online(dest_cpu))
1284				continue;
1285			if (!cpu_active(dest_cpu))
1286				continue;
1287			goto out;
1288		}
1289
1290		switch (state) {
1291		case cpuset:
1292			/* No more Mr. Nice Guy. */
1293			cpuset_cpus_allowed_fallback(p);
1294			state = possible;
1295			break;
1296
1297		case possible:
1298			do_set_cpus_allowed(p, cpu_possible_mask);
1299			state = fail;
1300			break;
1301
1302		case fail:
1303			BUG();
1304			break;
1305		}
1306	}
1307
1308out:
1309	if (state != cpuset) {
1310		/*
1311		 * Don't tell them about moving exiting tasks or
1312		 * kernel threads (both mm NULL), since they never
1313		 * leave kernel.
1314		 */
1315		if (p->mm && printk_ratelimit()) {
1316			printk_sched("process %d (%s) no longer affine to cpu%d\n",
1317					task_pid_nr(p), p->comm, cpu);
1318		}
1319	}
1320
1321	return dest_cpu;
1322}
1323
1324/*
1325 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1326 */
1327static inline
1328int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
1329{
1330	int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
1331
1332	/*
1333	 * In order not to call set_task_cpu() on a blocking task we need
1334	 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1335	 * cpu.
1336	 *
1337	 * Since this is common to all placement strategies, this lives here.
1338	 *
1339	 * [ this allows ->select_task() to simply return task_cpu(p) and
1340	 *   not worry about this generic constraint ]
1341	 */
1342	if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1343		     !cpu_online(cpu)))
1344		cpu = select_fallback_rq(task_cpu(p), p);
1345
1346	return cpu;
1347}
1348
1349static void update_avg(u64 *avg, u64 sample)
1350{
1351	s64 diff = sample - *avg;
1352	*avg += diff >> 3;
1353}
1354#endif
1355
1356static void
1357ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1358{
1359#ifdef CONFIG_SCHEDSTATS
1360	struct rq *rq = this_rq();
1361
1362#ifdef CONFIG_SMP
1363	int this_cpu = smp_processor_id();
1364
1365	if (cpu == this_cpu) {
1366		schedstat_inc(rq, ttwu_local);
1367		schedstat_inc(p, se.statistics.nr_wakeups_local);
1368	} else {
1369		struct sched_domain *sd;
1370
1371		schedstat_inc(p, se.statistics.nr_wakeups_remote);
1372		rcu_read_lock();
1373		for_each_domain(this_cpu, sd) {
1374			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1375				schedstat_inc(sd, ttwu_wake_remote);
1376				break;
1377			}
1378		}
1379		rcu_read_unlock();
1380	}
1381
1382	if (wake_flags & WF_MIGRATED)
1383		schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1384
1385#endif /* CONFIG_SMP */
1386
1387	schedstat_inc(rq, ttwu_count);
1388	schedstat_inc(p, se.statistics.nr_wakeups);
1389
1390	if (wake_flags & WF_SYNC)
1391		schedstat_inc(p, se.statistics.nr_wakeups_sync);
1392
1393#endif /* CONFIG_SCHEDSTATS */
1394}
1395
1396static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1397{
1398	activate_task(rq, p, en_flags);
1399	p->on_rq = 1;
1400
1401	/* if a worker is waking up, notify workqueue */
1402	if (p->flags & PF_WQ_WORKER)
1403		wq_worker_waking_up(p, cpu_of(rq));
1404}
1405
1406/*
1407 * Mark the task runnable and perform wakeup-preemption.
1408 */
1409static void
1410ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1411{
1412	trace_sched_wakeup(p, true);
1413	check_preempt_curr(rq, p, wake_flags);
1414
1415	p->state = TASK_RUNNING;
1416#ifdef CONFIG_SMP
1417	if (p->sched_class->task_woken)
1418		p->sched_class->task_woken(rq, p);
1419
1420	if (rq->idle_stamp) {
1421		u64 delta = rq->clock - rq->idle_stamp;
1422		u64 max = 2*sysctl_sched_migration_cost;
1423
1424		if (delta > max)
1425			rq->avg_idle = max;
1426		else
1427			update_avg(&rq->avg_idle, delta);
1428		rq->idle_stamp = 0;
1429	}
1430#endif
1431}
1432
1433static void
1434ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1435{
1436#ifdef CONFIG_SMP
1437	if (p->sched_contributes_to_load)
1438		rq->nr_uninterruptible--;
1439#endif
1440
1441	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1442	ttwu_do_wakeup(rq, p, wake_flags);
1443}
1444
1445/*
1446 * Called in case the task @p isn't fully descheduled from its runqueue,
1447 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1448 * since all we need to do is flip p->state to TASK_RUNNING, since
1449 * the task is still ->on_rq.
1450 */
1451static int ttwu_remote(struct task_struct *p, int wake_flags)
1452{
1453	struct rq *rq;
1454	int ret = 0;
1455
1456	rq = __task_rq_lock(p);
1457	if (p->on_rq) {
1458		ttwu_do_wakeup(rq, p, wake_flags);
1459		ret = 1;
1460	}
1461	__task_rq_unlock(rq);
1462
1463	return ret;
1464}
1465
1466#ifdef CONFIG_SMP
1467static void sched_ttwu_pending(void)
1468{
1469	struct rq *rq = this_rq();
1470	struct llist_node *llist = llist_del_all(&rq->wake_list);
1471	struct task_struct *p;
1472
1473	raw_spin_lock(&rq->lock);
1474
1475	while (llist) {
1476		p = llist_entry(llist, struct task_struct, wake_entry);
1477		llist = llist_next(llist);
1478		ttwu_do_activate(rq, p, 0);
1479	}
1480
1481	raw_spin_unlock(&rq->lock);
1482}
1483
1484void scheduler_ipi(void)
1485{
1486	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1487		return;
1488
1489	/*
1490	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1491	 * traditionally all their work was done from the interrupt return
1492	 * path. Now that we actually do some work, we need to make sure
1493	 * we do call them.
1494	 *
1495	 * Some archs already do call them, luckily irq_enter/exit nest
1496	 * properly.
1497	 *
1498	 * Arguably we should visit all archs and update all handlers,
1499	 * however a fair share of IPIs are still resched only so this would
1500	 * somewhat pessimize the simple resched case.
1501	 */
1502	irq_enter();
1503	sched_ttwu_pending();
1504
1505	/*
1506	 * Check if someone kicked us for doing the nohz idle load balance.
1507	 */
1508	if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1509		this_rq()->idle_balance = 1;
1510		raise_softirq_irqoff(SCHED_SOFTIRQ);
1511	}
1512	irq_exit();
1513}
1514
1515static void ttwu_queue_remote(struct task_struct *p, int cpu)
1516{
1517	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
1518		smp_send_reschedule(cpu);
1519}
1520
1521#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1522static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
1523{
1524	struct rq *rq;
1525	int ret = 0;
1526
1527	rq = __task_rq_lock(p);
1528	if (p->on_cpu) {
1529		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1530		ttwu_do_wakeup(rq, p, wake_flags);
1531		ret = 1;
1532	}
1533	__task_rq_unlock(rq);
1534
1535	return ret;
1536
1537}
1538#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1539
1540bool cpus_share_cache(int this_cpu, int that_cpu)
1541{
1542	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1543}
1544#endif /* CONFIG_SMP */
1545
1546static void ttwu_queue(struct task_struct *p, int cpu)
1547{
1548	struct rq *rq = cpu_rq(cpu);
1549
1550#if defined(CONFIG_SMP)
1551	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1552		sched_clock_cpu(cpu); /* sync clocks x-cpu */
1553		ttwu_queue_remote(p, cpu);
1554		return;
1555	}
1556#endif
1557
1558	raw_spin_lock(&rq->lock);
1559	ttwu_do_activate(rq, p, 0);
1560	raw_spin_unlock(&rq->lock);
1561}
1562
1563/**
1564 * try_to_wake_up - wake up a thread
1565 * @p: the thread to be awakened
1566 * @state: the mask of task states that can be woken
1567 * @wake_flags: wake modifier flags (WF_*)
1568 *
1569 * Put it on the run-queue if it's not already there. The "current"
1570 * thread is always on the run-queue (except when the actual
1571 * re-schedule is in progress), and as such you're allowed to do
1572 * the simpler "current->state = TASK_RUNNING" to mark yourself
1573 * runnable without the overhead of this.
1574 *
1575 * Returns %true if @p was woken up, %false if it was already running
1576 * or @state didn't match @p's state.
1577 */
1578static int
1579try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1580{
1581	unsigned long flags;
1582	int cpu, success = 0;
1583
1584	smp_wmb();
1585	raw_spin_lock_irqsave(&p->pi_lock, flags);
1586	if (!(p->state & state))
1587		goto out;
1588
1589	success = 1; /* we're going to change ->state */
1590	cpu = task_cpu(p);
1591
1592	if (p->on_rq && ttwu_remote(p, wake_flags))
1593		goto stat;
1594
1595#ifdef CONFIG_SMP
1596	/*
1597	 * If the owning (remote) cpu is still in the middle of schedule() with
1598	 * this task as prev, wait until its done referencing the task.
1599	 */
1600	while (p->on_cpu) {
1601#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1602		/*
1603		 * In case the architecture enables interrupts in
1604		 * context_switch(), we cannot busy wait, since that
1605		 * would lead to deadlocks when an interrupt hits and
1606		 * tries to wake up @prev. So bail and do a complete
1607		 * remote wakeup.
1608		 */
1609		if (ttwu_activate_remote(p, wake_flags))
1610			goto stat;
1611#else
1612		cpu_relax();
1613#endif
1614	}
1615	/*
1616	 * Pairs with the smp_wmb() in finish_lock_switch().
1617	 */
1618	smp_rmb();
1619
1620	p->sched_contributes_to_load = !!task_contributes_to_load(p);
1621	p->state = TASK_WAKING;
1622
1623	if (p->sched_class->task_waking)
1624		p->sched_class->task_waking(p);
1625
1626	cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
1627	if (task_cpu(p) != cpu) {
1628		wake_flags |= WF_MIGRATED;
1629		set_task_cpu(p, cpu);
1630	}
1631#endif /* CONFIG_SMP */
1632
1633	ttwu_queue(p, cpu);
1634stat:
1635	ttwu_stat(p, cpu, wake_flags);
1636out:
1637	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1638
1639	return success;
1640}
1641
1642/**
1643 * try_to_wake_up_local - try to wake up a local task with rq lock held
1644 * @p: the thread to be awakened
1645 *
1646 * Put @p on the run-queue if it's not already there. The caller must
1647 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1648 * the current task.
1649 */
1650static void try_to_wake_up_local(struct task_struct *p)
1651{
1652	struct rq *rq = task_rq(p);
1653
1654	BUG_ON(rq != this_rq());
1655	BUG_ON(p == current);
1656	lockdep_assert_held(&rq->lock);
1657
1658	if (!raw_spin_trylock(&p->pi_lock)) {
1659		raw_spin_unlock(&rq->lock);
1660		raw_spin_lock(&p->pi_lock);
1661		raw_spin_lock(&rq->lock);
1662	}
1663
1664	if (!(p->state & TASK_NORMAL))
1665		goto out;
1666
1667	if (!p->on_rq)
1668		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1669
1670	ttwu_do_wakeup(rq, p, 0);
1671	ttwu_stat(p, smp_processor_id(), 0);
1672out:
1673	raw_spin_unlock(&p->pi_lock);
1674}
1675
1676/**
1677 * wake_up_process - Wake up a specific process
1678 * @p: The process to be woken up.
1679 *
1680 * Attempt to wake up the nominated process and move it to the set of runnable
1681 * processes.  Returns 1 if the process was woken up, 0 if it was already
1682 * running.
1683 *
1684 * It may be assumed that this function implies a write memory barrier before
1685 * changing the task state if and only if any tasks are woken up.
1686 */
1687int wake_up_process(struct task_struct *p)
1688{
1689	return try_to_wake_up(p, TASK_ALL, 0);
1690}
1691EXPORT_SYMBOL(wake_up_process);
1692
1693int wake_up_state(struct task_struct *p, unsigned int state)
1694{
1695	return try_to_wake_up(p, state, 0);
1696}
1697
1698/*
1699 * Perform scheduler related setup for a newly forked process p.
1700 * p is forked by current.
1701 *
1702 * __sched_fork() is basic setup used by init_idle() too:
1703 */
1704static void __sched_fork(struct task_struct *p)
1705{
1706	p->on_rq			= 0;
1707
1708	p->se.on_rq			= 0;
1709	p->se.exec_start		= 0;
1710	p->se.sum_exec_runtime		= 0;
1711	p->se.prev_sum_exec_runtime	= 0;
1712	p->se.nr_migrations		= 0;
1713	p->se.vruntime			= 0;
1714	INIT_LIST_HEAD(&p->se.group_node);
1715
1716#ifdef CONFIG_SCHEDSTATS
1717	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1718#endif
1719
1720	INIT_LIST_HEAD(&p->rt.run_list);
1721
1722#ifdef CONFIG_PREEMPT_NOTIFIERS
1723	INIT_HLIST_HEAD(&p->preempt_notifiers);
1724#endif
1725}
1726
1727/*
1728 * fork()/clone()-time setup:
1729 */
1730void sched_fork(struct task_struct *p)
1731{
1732	unsigned long flags;
1733	int cpu = get_cpu();
1734
1735	__sched_fork(p);
1736	/*
1737	 * We mark the process as running here. This guarantees that
1738	 * nobody will actually run it, and a signal or other external
1739	 * event cannot wake it up and insert it on the runqueue either.
1740	 */
1741	p->state = TASK_RUNNING;
1742
1743	/*
1744	 * Make sure we do not leak PI boosting priority to the child.
1745	 */
1746	p->prio = current->normal_prio;
1747
1748	/*
1749	 * Revert to default priority/policy on fork if requested.
1750	 */
1751	if (unlikely(p->sched_reset_on_fork)) {
1752		if (task_has_rt_policy(p)) {
1753			p->policy = SCHED_NORMAL;
1754			p->static_prio = NICE_TO_PRIO(0);
1755			p->rt_priority = 0;
1756		} else if (PRIO_TO_NICE(p->static_prio) < 0)
1757			p->static_prio = NICE_TO_PRIO(0);
1758
1759		p->prio = p->normal_prio = __normal_prio(p);
1760		set_load_weight(p);
1761
1762		/*
1763		 * We don't need the reset flag anymore after the fork. It has
1764		 * fulfilled its duty:
1765		 */
1766		p->sched_reset_on_fork = 0;
1767	}
1768
1769	if (!rt_prio(p->prio))
1770		p->sched_class = &fair_sched_class;
1771
1772	if (p->sched_class->task_fork)
1773		p->sched_class->task_fork(p);
1774
1775	/*
1776	 * The child is not yet in the pid-hash so no cgroup attach races,
1777	 * and the cgroup is pinned to this child due to cgroup_fork()
1778	 * is ran before sched_fork().
1779	 *
1780	 * Silence PROVE_RCU.
1781	 */
1782	raw_spin_lock_irqsave(&p->pi_lock, flags);
1783	set_task_cpu(p, cpu);
1784	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1785
1786#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1787	if (likely(sched_info_on()))
1788		memset(&p->sched_info, 0, sizeof(p->sched_info));
1789#endif
1790#if defined(CONFIG_SMP)
1791	p->on_cpu = 0;
1792#endif
1793#ifdef CONFIG_PREEMPT_COUNT
1794	/* Want to start with kernel preemption disabled. */
1795	task_thread_info(p)->preempt_count = 1;
1796#endif
1797#ifdef CONFIG_SMP
1798	plist_node_init(&p->pushable_tasks, MAX_PRIO);
1799#endif
1800
1801	put_cpu();
1802}
1803
1804/*
1805 * wake_up_new_task - wake up a newly created task for the first time.
1806 *
1807 * This function will do some initial scheduler statistics housekeeping
1808 * that must be done for every newly created context, then puts the task
1809 * on the runqueue and wakes it.
1810 */
1811void wake_up_new_task(struct task_struct *p)
1812{
1813	unsigned long flags;
1814	struct rq *rq;
1815
1816	raw_spin_lock_irqsave(&p->pi_lock, flags);
1817#ifdef CONFIG_SMP
1818	/*
1819	 * Fork balancing, do it here and not earlier because:
1820	 *  - cpus_allowed can change in the fork path
1821	 *  - any previously selected cpu might disappear through hotplug
1822	 */
1823	set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
1824#endif
1825
1826	rq = __task_rq_lock(p);
1827	activate_task(rq, p, 0);
1828	p->on_rq = 1;
1829	trace_sched_wakeup_new(p, true);
1830	check_preempt_curr(rq, p, WF_FORK);
1831#ifdef CONFIG_SMP
1832	if (p->sched_class->task_woken)
1833		p->sched_class->task_woken(rq, p);
1834#endif
1835	task_rq_unlock(rq, p, &flags);
1836}
1837
1838#ifdef CONFIG_PREEMPT_NOTIFIERS
1839
1840/**
1841 * preempt_notifier_register - tell me when current is being preempted & rescheduled
1842 * @notifier: notifier struct to register
1843 */
1844void preempt_notifier_register(struct preempt_notifier *notifier)
1845{
1846	hlist_add_head(&notifier->link, &current->preempt_notifiers);
1847}
1848EXPORT_SYMBOL_GPL(preempt_notifier_register);
1849
1850/**
1851 * preempt_notifier_unregister - no longer interested in preemption notifications
1852 * @notifier: notifier struct to unregister
1853 *
1854 * This is safe to call from within a preemption notifier.
1855 */
1856void preempt_notifier_unregister(struct preempt_notifier *notifier)
1857{
1858	hlist_del(&notifier->link);
1859}
1860EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1861
1862static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1863{
1864	struct preempt_notifier *notifier;
1865	struct hlist_node *node;
1866
1867	hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1868		notifier->ops->sched_in(notifier, raw_smp_processor_id());
1869}
1870
1871static void
1872fire_sched_out_preempt_notifiers(struct task_struct *curr,
1873				 struct task_struct *next)
1874{
1875	struct preempt_notifier *notifier;
1876	struct hlist_node *node;
1877
1878	hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1879		notifier->ops->sched_out(notifier, next);
1880}
1881
1882#else /* !CONFIG_PREEMPT_NOTIFIERS */
1883
1884static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1885{
1886}
1887
1888static void
1889fire_sched_out_preempt_notifiers(struct task_struct *curr,
1890				 struct task_struct *next)
1891{
1892}
1893
1894#endif /* CONFIG_PREEMPT_NOTIFIERS */
1895
1896/**
1897 * prepare_task_switch - prepare to switch tasks
1898 * @rq: the runqueue preparing to switch
1899 * @prev: the current task that is being switched out
1900 * @next: the task we are going to switch to.
1901 *
1902 * This is called with the rq lock held and interrupts off. It must
1903 * be paired with a subsequent finish_task_switch after the context
1904 * switch.
1905 *
1906 * prepare_task_switch sets up locking and calls architecture specific
1907 * hooks.
1908 */
1909static inline void
1910prepare_task_switch(struct rq *rq, struct task_struct *prev,
1911		    struct task_struct *next)
1912{
1913	trace_sched_switch(prev, next);
1914	sched_info_switch(prev, next);
1915	perf_event_task_sched_out(prev, next);
1916	fire_sched_out_preempt_notifiers(prev, next);
1917	prepare_lock_switch(rq, next);
1918	prepare_arch_switch(next);
1919}
1920
1921/**
1922 * finish_task_switch - clean up after a task-switch
1923 * @rq: runqueue associated with task-switch
1924 * @prev: the thread we just switched away from.
1925 *
1926 * finish_task_switch must be called after the context switch, paired
1927 * with a prepare_task_switch call before the context switch.
1928 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1929 * and do any other architecture-specific cleanup actions.
1930 *
1931 * Note that we may have delayed dropping an mm in context_switch(). If
1932 * so, we finish that here outside of the runqueue lock. (Doing it
1933 * with the lock held can cause deadlocks; see schedule() for
1934 * details.)
1935 */
1936static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1937	__releases(rq->lock)
1938{
1939	struct mm_struct *mm = rq->prev_mm;
1940	long prev_state;
1941
1942	rq->prev_mm = NULL;
1943
1944	/*
1945	 * A task struct has one reference for the use as "current".
1946	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1947	 * schedule one last time. The schedule call will never return, and
1948	 * the scheduled task must drop that reference.
1949	 * The test for TASK_DEAD must occur while the runqueue locks are
1950	 * still held, otherwise prev could be scheduled on another cpu, die
1951	 * there before we look at prev->state, and then the reference would
1952	 * be dropped twice.
1953	 *		Manfred Spraul <manfred@colorfullife.com>
1954	 */
1955	prev_state = prev->state;
1956	finish_arch_switch(prev);
1957#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1958	local_irq_disable();
1959#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1960	perf_event_task_sched_in(prev, current);
1961#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1962	local_irq_enable();
1963#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1964	finish_lock_switch(rq, prev);
1965	finish_arch_post_lock_switch();
1966
1967	fire_sched_in_preempt_notifiers(current);
1968	if (mm)
1969		mmdrop(mm);
1970	if (unlikely(prev_state == TASK_DEAD)) {
1971		/*
1972		 * Remove function-return probe instances associated with this
1973		 * task and put them back on the free list.
1974		 */
1975		kprobe_flush_task(prev);
1976		put_task_struct(prev);
1977	}
1978}
1979
1980#ifdef CONFIG_SMP
1981
1982/* assumes rq->lock is held */
1983static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1984{
1985	if (prev->sched_class->pre_schedule)
1986		prev->sched_class->pre_schedule(rq, prev);
1987}
1988
1989/* rq->lock is NOT held, but preemption is disabled */
1990static inline void post_schedule(struct rq *rq)
1991{
1992	if (rq->post_schedule) {
1993		unsigned long flags;
1994
1995		raw_spin_lock_irqsave(&rq->lock, flags);
1996		if (rq->curr->sched_class->post_schedule)
1997			rq->curr->sched_class->post_schedule(rq);
1998		raw_spin_unlock_irqrestore(&rq->lock, flags);
1999
2000		rq->post_schedule = 0;
2001	}
2002}
2003
2004#else
2005
2006static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2007{
2008}
2009
2010static inline void post_schedule(struct rq *rq)
2011{
2012}
2013
2014#endif
2015
2016/**
2017 * schedule_tail - first thing a freshly forked thread must call.
2018 * @prev: the thread we just switched away from.
2019 */
2020asmlinkage void schedule_tail(struct task_struct *prev)
2021	__releases(rq->lock)
2022{
2023	struct rq *rq = this_rq();
2024
2025	finish_task_switch(rq, prev);
2026
2027	/*
2028	 * FIXME: do we need to worry about rq being invalidated by the
2029	 * task_switch?
2030	 */
2031	post_schedule(rq);
2032
2033#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2034	/* In this case, finish_task_switch does not reenable preemption */
2035	preempt_enable();
2036#endif
2037	if (current->set_child_tid)
2038		put_user(task_pid_vnr(current), current->set_child_tid);
2039}
2040
2041/*
2042 * context_switch - switch to the new MM and the new
2043 * thread's register state.
2044 */
2045static inline void
2046context_switch(struct rq *rq, struct task_struct *prev,
2047	       struct task_struct *next)
2048{
2049	struct mm_struct *mm, *oldmm;
2050
2051	prepare_task_switch(rq, prev, next);
2052
2053	mm = next->mm;
2054	oldmm = prev->active_mm;
2055	/*
2056	 * For paravirt, this is coupled with an exit in switch_to to
2057	 * combine the page table reload and the switch backend into
2058	 * one hypercall.
2059	 */
2060	arch_start_context_switch(prev);
2061
2062	if (!mm) {
2063		next->active_mm = oldmm;
2064		atomic_inc(&oldmm->mm_count);
2065		enter_lazy_tlb(oldmm, next);
2066	} else
2067		switch_mm(oldmm, mm, next);
2068
2069	if (!prev->mm) {
2070		prev->active_mm = NULL;
2071		rq->prev_mm = oldmm;
2072	}
2073	/*
2074	 * Since the runqueue lock will be released by the next
2075	 * task (which is an invalid locking op but in the case
2076	 * of the scheduler it's an obvious special-case), so we
2077	 * do an early lockdep release here:
2078	 */
2079#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2080	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2081#endif
2082
2083	/* Here we just switch the register state and the stack. */
2084	switch_to(prev, next, prev);
2085
2086	barrier();
2087	/*
2088	 * this_rq must be evaluated again because prev may have moved
2089	 * CPUs since it called schedule(), thus the 'rq' on its stack
2090	 * frame will be invalid.
2091	 */
2092	finish_task_switch(this_rq(), prev);
2093}
2094
2095/*
2096 * nr_running, nr_uninterruptible and nr_context_switches:
2097 *
2098 * externally visible scheduler statistics: current number of runnable
2099 * threads, current number of uninterruptible-sleeping threads, total
2100 * number of context switches performed since bootup.
2101 */
2102unsigned long nr_running(void)
2103{
2104	unsigned long i, sum = 0;
2105
2106	for_each_online_cpu(i)
2107		sum += cpu_rq(i)->nr_running;
2108
2109	return sum;
2110}
2111
2112unsigned long nr_uninterruptible(void)
2113{
2114	unsigned long i, sum = 0;
2115
2116	for_each_possible_cpu(i)
2117		sum += cpu_rq(i)->nr_uninterruptible;
2118
2119	/*
2120	 * Since we read the counters lockless, it might be slightly
2121	 * inaccurate. Do not allow it to go below zero though:
2122	 */
2123	if (unlikely((long)sum < 0))
2124		sum = 0;
2125
2126	return sum;
2127}
2128
2129unsigned long long nr_context_switches(void)
2130{
2131	int i;
2132	unsigned long long sum = 0;
2133
2134	for_each_possible_cpu(i)
2135		sum += cpu_rq(i)->nr_switches;
2136
2137	return sum;
2138}
2139
2140unsigned long nr_iowait(void)
2141{
2142	unsigned long i, sum = 0;
2143
2144	for_each_possible_cpu(i)
2145		sum += atomic_read(&cpu_rq(i)->nr_iowait);
2146
2147	return sum;
2148}
2149
2150unsigned long nr_iowait_cpu(int cpu)
2151{
2152	struct rq *this = cpu_rq(cpu);
2153	return atomic_read(&this->nr_iowait);
2154}
2155
2156unsigned long this_cpu_load(void)
2157{
2158	struct rq *this = this_rq();
2159	return this->cpu_load[0];
2160}
2161
2162
2163/*
2164 * Global load-average calculations
2165 *
2166 * We take a distributed and async approach to calculating the global load-avg
2167 * in order to minimize overhead.
2168 *
2169 * The global load average is an exponentially decaying average of nr_running +
2170 * nr_uninterruptible.
2171 *
2172 * Once every LOAD_FREQ:
2173 *
2174 *   nr_active = 0;
2175 *   for_each_possible_cpu(cpu)
2176 *   	nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
2177 *
2178 *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
2179 *
2180 * Due to a number of reasons the above turns in the mess below:
2181 *
2182 *  - for_each_possible_cpu() is prohibitively expensive on machines with
2183 *    serious number of cpus, therefore we need to take a distributed approach
2184 *    to calculating nr_active.
2185 *
2186 *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
2187 *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
2188 *
2189 *    So assuming nr_active := 0 when we start out -- true per definition, we
2190 *    can simply take per-cpu deltas and fold those into a global accumulate
2191 *    to obtain the same result. See calc_load_fold_active().
2192 *
2193 *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
2194 *    across the machine, we assume 10 ticks is sufficient time for every
2195 *    cpu to have completed this task.
2196 *
2197 *    This places an upper-bound on the IRQ-off latency of the machine. Then
2198 *    again, being late doesn't loose the delta, just wrecks the sample.
2199 *
2200 *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
2201 *    this would add another cross-cpu cacheline miss and atomic operation
2202 *    to the wakeup path. Instead we increment on whatever cpu the task ran
2203 *    when it went into uninterruptible state and decrement on whatever cpu
2204 *    did the wakeup. This means that only the sum of nr_uninterruptible over
2205 *    all cpus yields the correct result.
2206 *
2207 *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
2208 */
2209
2210/* Variables and functions for calc_load */
2211static atomic_long_t calc_load_tasks;
2212static unsigned long calc_load_update;
2213unsigned long avenrun[3];
2214EXPORT_SYMBOL(avenrun); /* should be removed */
2215
2216/**
2217 * get_avenrun - get the load average array
2218 * @loads:	pointer to dest load array
2219 * @offset:	offset to add
2220 * @shift:	shift count to shift the result left
2221 *
2222 * These values are estimates at best, so no need for locking.
2223 */
2224void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2225{
2226	loads[0] = (avenrun[0] + offset) << shift;
2227	loads[1] = (avenrun[1] + offset) << shift;
2228	loads[2] = (avenrun[2] + offset) << shift;
2229}
2230
2231static long calc_load_fold_active(struct rq *this_rq)
2232{
2233	long nr_active, delta = 0;
2234
2235	nr_active = this_rq->nr_running;
2236	nr_active += (long) this_rq->nr_uninterruptible;
2237
2238	if (nr_active != this_rq->calc_load_active) {
2239		delta = nr_active - this_rq->calc_load_active;
2240		this_rq->calc_load_active = nr_active;
2241	}
2242
2243	return delta;
2244}
2245
2246/*
2247 * a1 = a0 * e + a * (1 - e)
2248 */
2249static unsigned long
2250calc_load(unsigned long load, unsigned long exp, unsigned long active)
2251{
2252	load *= exp;
2253	load += active * (FIXED_1 - exp);
2254	load += 1UL << (FSHIFT - 1);
2255	return load >> FSHIFT;
2256}
2257
2258#ifdef CONFIG_NO_HZ
2259/*
2260 * Handle NO_HZ for the global load-average.
2261 *
2262 * Since the above described distributed algorithm to compute the global
2263 * load-average relies on per-cpu sampling from the tick, it is affected by
2264 * NO_HZ.
2265 *
2266 * The basic idea is to fold the nr_active delta into a global idle-delta upon
2267 * entering NO_HZ state such that we can include this as an 'extra' cpu delta
2268 * when we read the global state.
2269 *
2270 * Obviously reality has to ruin such a delightfully simple scheme:
2271 *
2272 *  - When we go NO_HZ idle during the window, we can negate our sample
2273 *    contribution, causing under-accounting.
2274 *
2275 *    We avoid this by keeping two idle-delta counters and flipping them
2276 *    when the window starts, thus separating old and new NO_HZ load.
2277 *
2278 *    The only trick is the slight shift in index flip for read vs write.
2279 *
2280 *        0s            5s            10s           15s
2281 *          +10           +10           +10           +10
2282 *        |-|-----------|-|-----------|-|-----------|-|
2283 *    r:0 0 1           1 0           0 1           1 0
2284 *    w:0 1 1           0 0           1 1           0 0
2285 *
2286 *    This ensures we'll fold the old idle contribution in this window while
2287 *    accumlating the new one.
2288 *
2289 *  - When we wake up from NO_HZ idle during the window, we push up our
2290 *    contribution, since we effectively move our sample point to a known
2291 *    busy state.
2292 *
2293 *    This is solved by pushing the window forward, and thus skipping the
2294 *    sample, for this cpu (effectively using the idle-delta for this cpu which
2295 *    was in effect at the time the window opened). This also solves the issue
2296 *    of having to deal with a cpu having been in NOHZ idle for multiple
2297 *    LOAD_FREQ intervals.
2298 *
2299 * When making the ILB scale, we should try to pull this in as well.
2300 */
2301static atomic_long_t calc_load_idle[2];
2302static int calc_load_idx;
2303
2304static inline int calc_load_write_idx(void)
2305{
2306	int idx = calc_load_idx;
2307
2308	/*
2309	 * See calc_global_nohz(), if we observe the new index, we also
2310	 * need to observe the new update time.
2311	 */
2312	smp_rmb();
2313
2314	/*
2315	 * If the folding window started, make sure we start writing in the
2316	 * next idle-delta.
2317	 */
2318	if (!time_before(jiffies, calc_load_update))
2319		idx++;
2320
2321	return idx & 1;
2322}
2323
2324static inline int calc_load_read_idx(void)
2325{
2326	return calc_load_idx & 1;
2327}
2328
2329void calc_load_enter_idle(void)
2330{
2331	struct rq *this_rq = this_rq();
2332	long delta;
2333
2334	/*
2335	 * We're going into NOHZ mode, if there's any pending delta, fold it
2336	 * into the pending idle delta.
2337	 */
2338	delta = calc_load_fold_active(this_rq);
2339	if (delta) {
2340		int idx = calc_load_write_idx();
2341		atomic_long_add(delta, &calc_load_idle[idx]);
2342	}
2343}
2344
2345void calc_load_exit_idle(void)
2346{
2347	struct rq *this_rq = this_rq();
2348
2349	/*
2350	 * If we're still before the sample window, we're done.
2351	 */
2352	if (time_before(jiffies, this_rq->calc_load_update))
2353		return;
2354
2355	/*
2356	 * We woke inside or after the sample window, this means we're already
2357	 * accounted through the nohz accounting, so skip the entire deal and
2358	 * sync up for the next window.
2359	 */
2360	this_rq->calc_load_update = calc_load_update;
2361	if (time_before(jiffies, this_rq->calc_load_update + 10))
2362		this_rq->calc_load_update += LOAD_FREQ;
2363}
2364
2365static long calc_load_fold_idle(void)
2366{
2367	int idx = calc_load_read_idx();
2368	long delta = 0;
2369
2370	if (atomic_long_read(&calc_load_idle[idx]))
2371		delta = atomic_long_xchg(&calc_load_idle[idx], 0);
2372
2373	return delta;
2374}
2375
2376/**
2377 * fixed_power_int - compute: x^n, in O(log n) time
2378 *
2379 * @x:         base of the power
2380 * @frac_bits: fractional bits of @x
2381 * @n:         power to raise @x to.
2382 *
2383 * By exploiting the relation between the definition of the natural power
2384 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
2385 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
2386 * (where: n_i \elem {0, 1}, the binary vector representing n),
2387 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
2388 * of course trivially computable in O(log_2 n), the length of our binary
2389 * vector.
2390 */
2391static unsigned long
2392fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
2393{
2394	unsigned long result = 1UL << frac_bits;
2395
2396	if (n) for (;;) {
2397		if (n & 1) {
2398			result *= x;
2399			result += 1UL << (frac_bits - 1);
2400			result >>= frac_bits;
2401		}
2402		n >>= 1;
2403		if (!n)
2404			break;
2405		x *= x;
2406		x += 1UL << (frac_bits - 1);
2407		x >>= frac_bits;
2408	}
2409
2410	return result;
2411}
2412
2413/*
2414 * a1 = a0 * e + a * (1 - e)
2415 *
2416 * a2 = a1 * e + a * (1 - e)
2417 *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
2418 *    = a0 * e^2 + a * (1 - e) * (1 + e)
2419 *
2420 * a3 = a2 * e + a * (1 - e)
2421 *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
2422 *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
2423 *
2424 *  ...
2425 *
2426 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
2427 *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
2428 *    = a0 * e^n + a * (1 - e^n)
2429 *
2430 * [1] application of the geometric series:
2431 *
2432 *              n         1 - x^(n+1)
2433 *     S_n := \Sum x^i = -------------
2434 *             i=0          1 - x
2435 */
2436static unsigned long
2437calc_load_n(unsigned long load, unsigned long exp,
2438	    unsigned long active, unsigned int n)
2439{
2440
2441	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
2442}
2443
2444/*
2445 * NO_HZ can leave us missing all per-cpu ticks calling
2446 * calc_load_account_active(), but since an idle CPU folds its delta into
2447 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
2448 * in the pending idle delta if our idle period crossed a load cycle boundary.
2449 *
2450 * Once we've updated the global active value, we need to apply the exponential
2451 * weights adjusted to the number of cycles missed.
2452 */
2453static void calc_global_nohz(void)
2454{
2455	long delta, active, n;
2456
2457	if (!time_before(jiffies, calc_load_update + 10)) {
2458		/*
2459		 * Catch-up, fold however many we are behind still
2460		 */
2461		delta = jiffies - calc_load_update - 10;
2462		n = 1 + (delta / LOAD_FREQ);
2463
2464		active = atomic_long_read(&calc_load_tasks);
2465		active = active > 0 ? active * FIXED_1 : 0;
2466
2467		avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2468		avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2469		avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2470
2471		calc_load_update += n * LOAD_FREQ;
2472	}
2473
2474	/*
2475	 * Flip the idle index...
2476	 *
2477	 * Make sure we first write the new time then flip the index, so that
2478	 * calc_load_write_idx() will see the new time when it reads the new
2479	 * index, this avoids a double flip messing things up.
2480	 */
2481	smp_wmb();
2482	calc_load_idx++;
2483}
2484#else /* !CONFIG_NO_HZ */
2485
2486static inline long calc_load_fold_idle(void) { return 0; }
2487static inline void calc_global_nohz(void) { }
2488
2489#endif /* CONFIG_NO_HZ */
2490
2491/*
2492 * calc_load - update the avenrun load estimates 10 ticks after the
2493 * CPUs have updated calc_load_tasks.
2494 */
2495void calc_global_load(unsigned long ticks)
2496{
2497	long active, delta;
2498
2499	if (time_before(jiffies, calc_load_update + 10))
2500		return;
2501
2502	/*
2503	 * Fold the 'old' idle-delta to include all NO_HZ cpus.
2504	 */
2505	delta = calc_load_fold_idle();
2506	if (delta)
2507		atomic_long_add(delta, &calc_load_tasks);
2508
2509	active = atomic_long_read(&calc_load_tasks);
2510	active = active > 0 ? active * FIXED_1 : 0;
2511
2512	avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2513	avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2514	avenrun[2] = calc_load(avenrun[2], EXP_15, active);
2515
2516	calc_load_update += LOAD_FREQ;
2517
2518	/*
2519	 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
2520	 */
2521	calc_global_nohz();
2522}
2523
2524/*
2525 * Called from update_cpu_load() to periodically update this CPU's
2526 * active count.
2527 */
2528static void calc_load_account_active(struct rq *this_rq)
2529{
2530	long delta;
2531
2532	if (time_before(jiffies, this_rq->calc_load_update))
2533		return;
2534
2535	delta  = calc_load_fold_active(this_rq);
2536	if (delta)
2537		atomic_long_add(delta, &calc_load_tasks);
2538
2539	this_rq->calc_load_update += LOAD_FREQ;
2540}
2541
2542/*
2543 * End of global load-average stuff
2544 */
2545
2546/*
2547 * The exact cpuload at various idx values, calculated at every tick would be
2548 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2549 *
2550 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
2551 * on nth tick when cpu may be busy, then we have:
2552 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2553 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
2554 *
2555 * decay_load_missed() below does efficient calculation of
2556 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2557 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
2558 *
2559 * The calculation is approximated on a 128 point scale.
2560 * degrade_zero_ticks is the number of ticks after which load at any
2561 * particular idx is approximated to be zero.
2562 * degrade_factor is a precomputed table, a row for each load idx.
2563 * Each column corresponds to degradation factor for a power of two ticks,
2564 * based on 128 point scale.
2565 * Example:
2566 * row 2, col 3 (=12) says that the degradation at load idx 2 after
2567 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
2568 *
2569 * With this power of 2 load factors, we can degrade the load n times
2570 * by looking at 1 bits in n and doing as many mult/shift instead of
2571 * n mult/shifts needed by the exact degradation.
2572 */
2573#define DEGRADE_SHIFT		7
2574static const unsigned char
2575		degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
2576static const unsigned char
2577		degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
2578					{0, 0, 0, 0, 0, 0, 0, 0},
2579					{64, 32, 8, 0, 0, 0, 0, 0},
2580					{96, 72, 40, 12, 1, 0, 0},
2581					{112, 98, 75, 43, 15, 1, 0},
2582					{120, 112, 98, 76, 45, 16, 2} };
2583
2584/*
2585 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
2586 * would be when CPU is idle and so we just decay the old load without
2587 * adding any new load.
2588 */
2589static unsigned long
2590decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
2591{
2592	int j = 0;
2593
2594	if (!missed_updates)
2595		return load;
2596
2597	if (missed_updates >= degrade_zero_ticks[idx])
2598		return 0;
2599
2600	if (idx == 1)
2601		return load >> missed_updates;
2602
2603	while (missed_updates) {
2604		if (missed_updates % 2)
2605			load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
2606
2607		missed_updates >>= 1;
2608		j++;
2609	}
2610	return load;
2611}
2612
2613/*
2614 * Update rq->cpu_load[] statistics. This function is usually called every
2615 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
2616 * every tick. We fix it up based on jiffies.
2617 */
2618static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
2619			      unsigned long pending_updates)
2620{
2621	int i, scale;
2622
2623	this_rq->nr_load_updates++;
2624
2625	/* Update our load: */
2626	this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
2627	for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
2628		unsigned long old_load, new_load;
2629
2630		/* scale is effectively 1 << i now, and >> i divides by scale */
2631
2632		old_load = this_rq->cpu_load[i];
2633		old_load = decay_load_missed(old_load, pending_updates - 1, i);
2634		new_load = this_load;
2635		/*
2636		 * Round up the averaging division if load is increasing. This
2637		 * prevents us from getting stuck on 9 if the load is 10, for
2638		 * example.
2639		 */
2640		if (new_load > old_load)
2641			new_load += scale - 1;
2642
2643		this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
2644	}
2645
2646	sched_avg_update(this_rq);
2647}
2648
2649#ifdef CONFIG_NO_HZ
2650/*
2651 * There is no sane way to deal with nohz on smp when using jiffies because the
2652 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
2653 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
2654 *
2655 * Therefore we cannot use the delta approach from the regular tick since that
2656 * would seriously skew the load calculation. However we'll make do for those
2657 * updates happening while idle (nohz_idle_balance) or coming out of idle
2658 * (tick_nohz_idle_exit).
2659 *
2660 * This means we might still be one tick off for nohz periods.
2661 */
2662
2663/*
2664 * Called from nohz_idle_balance() to update the load ratings before doing the
2665 * idle balance.
2666 */
2667void update_idle_cpu_load(struct rq *this_rq)
2668{
2669	unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
2670	unsigned long load = this_rq->load.weight;
2671	unsigned long pending_updates;
2672
2673	/*
2674	 * bail if there's load or we're actually up-to-date.
2675	 */
2676	if (load || curr_jiffies == this_rq->last_load_update_tick)
2677		return;
2678
2679	pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2680	this_rq->last_load_update_tick = curr_jiffies;
2681
2682	__update_cpu_load(this_rq, load, pending_updates);
2683}
2684
2685/*
2686 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
2687 */
2688void update_cpu_load_nohz(void)
2689{
2690	struct rq *this_rq = this_rq();
2691	unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
2692	unsigned long pending_updates;
2693
2694	if (curr_jiffies == this_rq->last_load_update_tick)
2695		return;
2696
2697	raw_spin_lock(&this_rq->lock);
2698	pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2699	if (pending_updates) {
2700		this_rq->last_load_update_tick = curr_jiffies;
2701		/*
2702		 * We were idle, this means load 0, the current load might be
2703		 * !0 due to remote wakeups and the sort.
2704		 */
2705		__update_cpu_load(this_rq, 0, pending_updates);
2706	}
2707	raw_spin_unlock(&this_rq->lock);
2708}
2709#endif /* CONFIG_NO_HZ */
2710
2711/*
2712 * Called from scheduler_tick()
2713 */
2714static void update_cpu_load_active(struct rq *this_rq)
2715{
2716	/*
2717	 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
2718	 */
2719	this_rq->last_load_update_tick = jiffies;
2720	__update_cpu_load(this_rq, this_rq->load.weight, 1);
2721
2722	calc_load_account_active(this_rq);
2723}
2724
2725#ifdef CONFIG_SMP
2726
2727/*
2728 * sched_exec - execve() is a valuable balancing opportunity, because at
2729 * this point the task has the smallest effective memory and cache footprint.
2730 */
2731void sched_exec(void)
2732{
2733	struct task_struct *p = current;
2734	unsigned long flags;
2735	int dest_cpu;
2736
2737	raw_spin_lock_irqsave(&p->pi_lock, flags);
2738	dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
2739	if (dest_cpu == smp_processor_id())
2740		goto unlock;
2741
2742	if (likely(cpu_active(dest_cpu))) {
2743		struct migration_arg arg = { p, dest_cpu };
2744
2745		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2746		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2747		return;
2748	}
2749unlock:
2750	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2751}
2752
2753#endif
2754
2755DEFINE_PER_CPU(struct kernel_stat, kstat);
2756DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2757
2758EXPORT_PER_CPU_SYMBOL(kstat);
2759EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2760
2761/*
2762 * Return any ns on the sched_clock that have not yet been accounted in
2763 * @p in case that task is currently running.
2764 *
2765 * Called with task_rq_lock() held on @rq.
2766 */
2767static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2768{
2769	u64 ns = 0;
2770
2771	if (task_current(rq, p)) {
2772		update_rq_clock(rq);
2773		ns = rq->clock_task - p->se.exec_start;
2774		if ((s64)ns < 0)
2775			ns = 0;
2776	}
2777
2778	return ns;
2779}
2780
2781unsigned long long task_delta_exec(struct task_struct *p)
2782{
2783	unsigned long flags;
2784	struct rq *rq;
2785	u64 ns = 0;
2786
2787	rq = task_rq_lock(p, &flags);
2788	ns = do_task_delta_exec(p, rq);
2789	task_rq_unlock(rq, p, &flags);
2790
2791	return ns;
2792}
2793
2794/*
2795 * Return accounted runtime for the task.
2796 * In case the task is currently running, return the runtime plus current's
2797 * pending runtime that have not been accounted yet.
2798 */
2799unsigned long long task_sched_runtime(struct task_struct *p)
2800{
2801	unsigned long flags;
2802	struct rq *rq;
2803	u64 ns = 0;
2804
2805	rq = task_rq_lock(p, &flags);
2806	ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
2807	task_rq_unlock(rq, p, &flags);
2808
2809	return ns;
2810}
2811
2812#ifdef CONFIG_CGROUP_CPUACCT
2813struct cgroup_subsys cpuacct_subsys;
2814struct cpuacct root_cpuacct;
2815#endif
2816
2817static inline void task_group_account_field(struct task_struct *p, int index,
2818					    u64 tmp)
2819{
2820#ifdef CONFIG_CGROUP_CPUACCT
2821	struct kernel_cpustat *kcpustat;
2822	struct cpuacct *ca;
2823#endif
2824	/*
2825	 * Since all updates are sure to touch the root cgroup, we
2826	 * get ourselves ahead and touch it first. If the root cgroup
2827	 * is the only cgroup, then nothing else should be necessary.
2828	 *
2829	 */
2830	__get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
2831
2832#ifdef CONFIG_CGROUP_CPUACCT
2833	if (unlikely(!cpuacct_subsys.active))
2834		return;
2835
2836	rcu_read_lock();
2837	ca = task_ca(p);
2838	while (ca && (ca != &root_cpuacct)) {
2839		kcpustat = this_cpu_ptr(ca->cpustat);
2840		kcpustat->cpustat[index] += tmp;
2841		ca = parent_ca(ca);
2842	}
2843	rcu_read_unlock();
2844#endif
2845}
2846
2847
2848/*
2849 * Account user cpu time to a process.
2850 * @p: the process that the cpu time gets accounted to
2851 * @cputime: the cpu time spent in user space since the last update
2852 * @cputime_scaled: cputime scaled by cpu frequency
2853 */
2854void account_user_time(struct task_struct *p, cputime_t cputime,
2855		       cputime_t cputime_scaled)
2856{
2857	int index;
2858
2859	/* Add user time to process. */
2860	p->utime += cputime;
2861	p->utimescaled += cputime_scaled;
2862	account_group_user_time(p, cputime);
2863
2864	index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
2865
2866	/* Add user time to cpustat. */
2867	task_group_account_field(p, index, (__force u64) cputime);
2868
2869	/* Account for user time used */
2870	acct_update_integrals(p);
2871}
2872
2873/*
2874 * Account guest cpu time to a process.
2875 * @p: the process that the cpu time gets accounted to
2876 * @cputime: the cpu time spent in virtual machine since the last update
2877 * @cputime_scaled: cputime scaled by cpu frequency
2878 */
2879static void account_guest_time(struct task_struct *p, cputime_t cputime,
2880			       cputime_t cputime_scaled)
2881{
2882	u64 *cpustat = kcpustat_this_cpu->cpustat;
2883
2884	/* Add guest time to process. */
2885	p->utime += cputime;
2886	p->utimescaled += cputime_scaled;
2887	account_group_user_time(p, cputime);
2888	p->gtime += cputime;
2889
2890	/* Add guest time to cpustat. */
2891	if (TASK_NICE(p) > 0) {
2892		cpustat[CPUTIME_NICE] += (__force u64) cputime;
2893		cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
2894	} else {
2895		cpustat[CPUTIME_USER] += (__force u64) cputime;
2896		cpustat[CPUTIME_GUEST] += (__force u64) cputime;
2897	}
2898}
2899
2900/*
2901 * Account system cpu time to a process and desired cpustat field
2902 * @p: the process that the cpu time gets accounted to
2903 * @cputime: the cpu time spent in kernel space since the last update
2904 * @cputime_scaled: cputime scaled by cpu frequency
2905 * @target_cputime64: pointer to cpustat field that has to be updated
2906 */
2907static inline
2908void __account_system_time(struct task_struct *p, cputime_t cputime,
2909			cputime_t cputime_scaled, int index)
2910{
2911	/* Add system time to process. */
2912	p->stime += cputime;
2913	p->stimescaled += cputime_scaled;
2914	account_group_system_time(p, cputime);
2915
2916	/* Add system time to cpustat. */
2917	task_group_account_field(p, index, (__force u64) cputime);
2918
2919	/* Account for system time used */
2920	acct_update_integrals(p);
2921}
2922
2923/*
2924 * Account system cpu time to a process.
2925 * @p: the process that the cpu time gets accounted to
2926 * @hardirq_offset: the offset to subtract from hardirq_count()
2927 * @cputime: the cpu time spent in kernel space since the last update
2928 * @cputime_scaled: cputime scaled by cpu frequency
2929 */
2930void account_system_time(struct task_struct *p, int hardirq_offset,
2931			 cputime_t cputime, cputime_t cputime_scaled)
2932{
2933	int index;
2934
2935	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
2936		account_guest_time(p, cputime, cputime_scaled);
2937		return;
2938	}
2939
2940	if (hardirq_count() - hardirq_offset)
2941		index = CPUTIME_IRQ;
2942	else if (in_serving_softirq())
2943		index = CPUTIME_SOFTIRQ;
2944	else
2945		index = CPUTIME_SYSTEM;
2946
2947	__account_system_time(p, cputime, cputime_scaled, index);
2948}
2949
2950/*
2951 * Account for involuntary wait time.
2952 * @cputime: the cpu time spent in involuntary wait
2953 */
2954void account_steal_time(cputime_t cputime)
2955{
2956	u64 *cpustat = kcpustat_this_cpu->cpustat;
2957
2958	cpustat[CPUTIME_STEAL] += (__force u64) cputime;
2959}
2960
2961/*
2962 * Account for idle time.
2963 * @cputime: the cpu time spent in idle wait
2964 */
2965void account_idle_time(cputime_t cputime)
2966{
2967	u64 *cpustat = kcpustat_this_cpu->cpustat;
2968	struct rq *rq = this_rq();
2969
2970	if (atomic_read(&rq->nr_iowait) > 0)
2971		cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
2972	else
2973		cpustat[CPUTIME_IDLE] += (__force u64) cputime;
2974}
2975
2976static __always_inline bool steal_account_process_tick(void)
2977{
2978#ifdef CONFIG_PARAVIRT
2979	if (static_key_false(&paravirt_steal_enabled)) {
2980		u64 steal, st = 0;
2981
2982		steal = paravirt_steal_clock(smp_processor_id());
2983		steal -= this_rq()->prev_steal_time;
2984
2985		st = steal_ticks(steal);
2986		this_rq()->prev_steal_time += st * TICK_NSEC;
2987
2988		account_steal_time(st);
2989		return st;
2990	}
2991#endif
2992	return false;
2993}
2994
2995#ifndef CONFIG_VIRT_CPU_ACCOUNTING
2996
2997#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2998/*
2999 * Account a tick to a process and cpustat
3000 * @p: the process that the cpu time gets accounted to
3001 * @user_tick: is the tick from userspace
3002 * @rq: the pointer to rq
3003 *
3004 * Tick demultiplexing follows the order
3005 * - pending hardirq update
3006 * - pending softirq update
3007 * - user_time
3008 * - idle_time
3009 * - system time
3010 *   - check for guest_time
3011 *   - else account as system_time
3012 *
3013 * Check for hardirq is done both for system and user time as there is
3014 * no timer going off while we are on hardirq and hence we may never get an
3015 * opportunity to update it solely in system time.
3016 * p->stime and friends are only updated on system time and not on irq
3017 * softirq as those do not count in task exec_runtime any more.
3018 */
3019static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3020						struct rq *rq)
3021{
3022	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3023	u64 *cpustat = kcpustat_this_cpu->cpustat;
3024
3025	if (steal_account_process_tick())
3026		return;
3027
3028	if (irqtime_account_hi_update()) {
3029		cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
3030	} else if (irqtime_account_si_update()) {
3031		cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
3032	} else if (this_cpu_ksoftirqd() == p) {
3033		/*
3034		 * ksoftirqd time do not get accounted in cpu_softirq_time.
3035		 * So, we have to handle it separately here.
3036		 * Also, p->stime needs to be updated for ksoftirqd.
3037		 */
3038		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3039					CPUTIME_SOFTIRQ);
3040	} else if (user_tick) {
3041		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3042	} else if (p == rq->idle) {
3043		account_idle_time(cputime_one_jiffy);
3044	} else if (p->flags & PF_VCPU) { /* System time or guest time */
3045		account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3046	} else {
3047		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3048					CPUTIME_SYSTEM);
3049	}
3050}
3051
3052static void irqtime_account_idle_ticks(int ticks)
3053{
3054	int i;
3055	struct rq *rq = this_rq();
3056
3057	for (i = 0; i < ticks; i++)
3058		irqtime_account_process_tick(current, 0, rq);
3059}
3060#else /* CONFIG_IRQ_TIME_ACCOUNTING */
3061static void irqtime_account_idle_ticks(int ticks) {}
3062static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3063						struct rq *rq) {}
3064#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
3065
3066/*
3067 * Account a single tick of cpu time.
3068 * @p: the process that the cpu time gets accounted to
3069 * @user_tick: indicates if the tick is a user or a system tick
3070 */
3071void account_process_tick(struct task_struct *p, int user_tick)
3072{
3073	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3074	struct rq *rq = this_rq();
3075
3076	if (sched_clock_irqtime) {
3077		irqtime_account_process_tick(p, user_tick, rq);
3078		return;
3079	}
3080
3081	if (steal_account_process_tick())
3082		return;
3083
3084	if (user_tick)
3085		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3086	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
3087		account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
3088				    one_jiffy_scaled);
3089	else
3090		account_idle_time(cputime_one_jiffy);
3091}
3092
3093/*
3094 * Account multiple ticks of steal time.
3095 * @p: the process from which the cpu time has been stolen
3096 * @ticks: number of stolen ticks
3097 */
3098void account_steal_ticks(unsigned long ticks)
3099{
3100	account_steal_time(jiffies_to_cputime(ticks));
3101}
3102
3103/*
3104 * Account multiple ticks of idle time.
3105 * @ticks: number of stolen ticks
3106 */
3107void account_idle_ticks(unsigned long ticks)
3108{
3109
3110	if (sched_clock_irqtime) {
3111		irqtime_account_idle_ticks(ticks);
3112		return;
3113	}
3114
3115	account_idle_time(jiffies_to_cputime(ticks));
3116}
3117
3118#endif
3119
3120/*
3121 * Use precise platform statistics if available:
3122 */
3123#ifdef CONFIG_VIRT_CPU_ACCOUNTING
3124void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3125{
3126	*ut = p->utime;
3127	*st = p->stime;
3128}
3129
3130void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3131{
3132	struct task_cputime cputime;
3133
3134	thread_group_cputime(p, &cputime);
3135
3136	*ut = cputime.utime;
3137	*st = cputime.stime;
3138}
3139#else
3140
3141#ifndef nsecs_to_cputime
3142# define nsecs_to_cputime(__nsecs)	nsecs_to_jiffies(__nsecs)
3143#endif
3144
3145static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
3146{
3147	u64 temp = (__force u64) rtime;
3148
3149	temp *= (__force u64) utime;
3150
3151	if (sizeof(cputime_t) == 4)
3152		temp = div_u64(temp, (__force u32) total);
3153	else
3154		temp = div64_u64(temp, (__force u64) total);
3155
3156	return (__force cputime_t) temp;
3157}
3158
3159void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3160{
3161	cputime_t rtime, utime = p->utime, total = utime + p->stime;
3162
3163	/*
3164	 * Use CFS's precise accounting:
3165	 */
3166	rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
3167
3168	if (total)
3169		utime = scale_utime(utime, rtime, total);
3170	else
3171		utime = rtime;
3172
3173	/*
3174	 * Compare with previous values, to keep monotonicity:
3175	 */
3176	p->prev_utime = max(p->prev_utime, utime);
3177	p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
3178
3179	*ut = p->prev_utime;
3180	*st = p->prev_stime;
3181}
3182
3183/*
3184 * Must be called with siglock held.
3185 */
3186void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3187{
3188	struct signal_struct *sig = p->signal;
3189	struct task_cputime cputime;
3190	cputime_t rtime, utime, total;
3191
3192	thread_group_cputime(p, &cputime);
3193
3194	total = cputime.utime + cputime.stime;
3195	rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3196
3197	if (total)
3198		utime = scale_utime(cputime.utime, rtime, total);
3199	else
3200		utime = rtime;
3201
3202	sig->prev_utime = max(sig->prev_utime, utime);
3203	sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
3204
3205	*ut = sig->prev_utime;
3206	*st = sig->prev_stime;
3207}
3208#endif
3209
3210/*
3211 * This function gets called by the timer code, with HZ frequency.
3212 * We call it with interrupts disabled.
3213 */
3214void scheduler_tick(void)
3215{
3216	int cpu = smp_processor_id();
3217	struct rq *rq = cpu_rq(cpu);
3218	struct task_struct *curr = rq->curr;
3219
3220	sched_clock_tick();
3221
3222	raw_spin_lock(&rq->lock);
3223	update_rq_clock(rq);
3224	update_cpu_load_active(rq);
3225	curr->sched_class->task_tick(rq, curr, 0);
3226	raw_spin_unlock(&rq->lock);
3227
3228	perf_event_task_tick();
3229
3230#ifdef CONFIG_SMP
3231	rq->idle_balance = idle_cpu(cpu);
3232	trigger_load_balance(rq, cpu);
3233#endif
3234}
3235
3236notrace unsigned long get_parent_ip(unsigned long addr)
3237{
3238	if (in_lock_functions(addr)) {
3239		addr = CALLER_ADDR2;
3240		if (in_lock_functions(addr))
3241			addr = CALLER_ADDR3;
3242	}
3243	return addr;
3244}
3245
3246#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3247				defined(CONFIG_PREEMPT_TRACER))
3248
3249void __kprobes add_preempt_count(int val)
3250{
3251#ifdef CONFIG_DEBUG_PREEMPT
3252	/*
3253	 * Underflow?
3254	 */
3255	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3256		return;
3257#endif
3258	preempt_count() += val;
3259#ifdef CONFIG_DEBUG_PREEMPT
3260	/*
3261	 * Spinlock count overflowing soon?
3262	 */
3263	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3264				PREEMPT_MASK - 10);
3265#endif
3266	if (preempt_count() == val)
3267		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3268}
3269EXPORT_SYMBOL(add_preempt_count);
3270
3271void __kprobes sub_preempt_count(int val)
3272{
3273#ifdef CONFIG_DEBUG_PREEMPT
3274	/*
3275	 * Underflow?
3276	 */
3277	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3278		return;
3279	/*
3280	 * Is the spinlock portion underflowing?
3281	 */
3282	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3283			!(preempt_count() & PREEMPT_MASK)))
3284		return;
3285#endif
3286
3287	if (preempt_count() == val)
3288		trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3289	preempt_count() -= val;
3290}
3291EXPORT_SYMBOL(sub_preempt_count);
3292
3293#endif
3294
3295/*
3296 * Print scheduling while atomic bug:
3297 */
3298static noinline void __schedule_bug(struct task_struct *prev)
3299{
3300	if (oops_in_progress)
3301		return;
3302
3303	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3304		prev->comm, prev->pid, preempt_count());
3305
3306	debug_show_held_locks(prev);
3307	print_modules();
3308	if (irqs_disabled())
3309		print_irqtrace_events(prev);
3310	dump_stack();
3311	add_taint(TAINT_WARN);
3312}
3313
3314/*
3315 * Various schedule()-time debugging checks and statistics:
3316 */
3317static inline void schedule_debug(struct task_struct *prev)
3318{
3319	/*
3320	 * Test if we are atomic. Since do_exit() needs to call into
3321	 * schedule() atomically, we ignore that path for now.
3322	 * Otherwise, whine if we are scheduling when we should not be.
3323	 */
3324	if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
3325		__schedule_bug(prev);
3326	rcu_sleep_check();
3327
3328	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3329
3330	schedstat_inc(this_rq(), sched_count);
3331}
3332
3333static void put_prev_task(struct rq *rq, struct task_struct *prev)
3334{
3335	if (prev->on_rq || rq->skip_clock_update < 0)
3336		update_rq_clock(rq);
3337	prev->sched_class->put_prev_task(rq, prev);
3338}
3339
3340/*
3341 * Pick up the highest-prio task:
3342 */
3343static inline struct task_struct *
3344pick_next_task(struct rq *rq)
3345{
3346	const struct sched_class *class;
3347	struct task_struct *p;
3348
3349	/*
3350	 * Optimization: we know that if all tasks are in
3351	 * the fair class we can call that function directly:
3352	 */
3353	if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
3354		p = fair_sched_class.pick_next_task(rq);
3355		if (likely(p))
3356			return p;
3357	}
3358
3359	for_each_class(class) {
3360		p = class->pick_next_task(rq);
3361		if (p)
3362			return p;
3363	}
3364
3365	BUG(); /* the idle class will always have a runnable task */
3366}
3367
3368/*
3369 * __schedule() is the main scheduler function.
3370 */
3371static void __sched __schedule(void)
3372{
3373	struct task_struct *prev, *next;
3374	unsigned long *switch_count;
3375	struct rq *rq;
3376	int cpu;
3377
3378need_resched:
3379	preempt_disable();
3380	cpu = smp_processor_id();
3381	rq = cpu_rq(cpu);
3382	rcu_note_context_switch(cpu);
3383	prev = rq->curr;
3384
3385	schedule_debug(prev);
3386
3387	if (sched_feat(HRTICK))
3388		hrtick_clear(rq);
3389
3390	raw_spin_lock_irq(&rq->lock);
3391
3392	switch_count = &prev->nivcsw;
3393	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3394		if (unlikely(signal_pending_state(prev->state, prev))) {
3395			prev->state = TASK_RUNNING;
3396		} else {
3397			deactivate_task(rq, prev, DEQUEUE_SLEEP);
3398			prev->on_rq = 0;
3399
3400			/*
3401			 * If a worker went to sleep, notify and ask workqueue
3402			 * whether it wants to wake up a task to maintain
3403			 * concurrency.
3404			 */
3405			if (prev->flags & PF_WQ_WORKER) {
3406				struct task_struct *to_wakeup;
3407
3408				to_wakeup = wq_worker_sleeping(prev, cpu);
3409				if (to_wakeup)
3410					try_to_wake_up_local(to_wakeup);
3411			}
3412		}
3413		switch_count = &prev->nvcsw;
3414	}
3415
3416	pre_schedule(rq, prev);
3417
3418	if (unlikely(!rq->nr_running))
3419		idle_balance(cpu, rq);
3420
3421	put_prev_task(rq, prev);
3422	next = pick_next_task(rq);
3423	clear_tsk_need_resched(prev);
3424	rq->skip_clock_update = 0;
3425
3426	if (likely(prev != next)) {
3427		rq->nr_switches++;
3428		rq->curr = next;
3429		++*switch_count;
3430
3431		context_switch(rq, prev, next); /* unlocks the rq */
3432		/*
3433		 * The context switch have flipped the stack from under us
3434		 * and restored the local variables which were saved when
3435		 * this task called schedule() in the past. prev == current
3436		 * is still correct, but it can be moved to another cpu/rq.
3437		 */
3438		cpu = smp_processor_id();
3439		rq = cpu_rq(cpu);
3440	} else
3441		raw_spin_unlock_irq(&rq->lock);
3442
3443	post_schedule(rq);
3444
3445	sched_preempt_enable_no_resched();
3446	if (need_resched())
3447		goto need_resched;
3448}
3449
3450static inline void sched_submit_work(struct task_struct *tsk)
3451{
3452	if (!tsk->state || tsk_is_pi_blocked(tsk))
3453		return;
3454	/*
3455	 * If we are going to sleep and we have plugged IO queued,
3456	 * make sure to submit it to avoid deadlocks.
3457	 */
3458	if (blk_needs_flush_plug(tsk))
3459		blk_schedule_flush_plug(tsk);
3460}
3461
3462asmlinkage void __sched schedule(void)
3463{
3464	struct task_struct *tsk = current;
3465
3466	sched_submit_work(tsk);
3467	__schedule();
3468}
3469EXPORT_SYMBOL(schedule);
3470
3471/**
3472 * schedule_preempt_disabled - called with preemption disabled
3473 *
3474 * Returns with preemption disabled. Note: preempt_count must be 1
3475 */
3476void __sched schedule_preempt_disabled(void)
3477{
3478	sched_preempt_enable_no_resched();
3479	schedule();
3480	preempt_disable();
3481}
3482
3483#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
3484
3485static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
3486{
3487	if (lock->owner != owner)
3488		return false;
3489
3490	/*
3491	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
3492	 * lock->owner still matches owner, if that fails, owner might
3493	 * point to free()d memory, if it still matches, the rcu_read_lock()
3494	 * ensures the memory stays valid.
3495	 */
3496	barrier();
3497
3498	return owner->on_cpu;
3499}
3500
3501/*
3502 * Look out! "owner" is an entirely speculative pointer
3503 * access and not reliable.
3504 */
3505int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
3506{
3507	if (!sched_feat(OWNER_SPIN))
3508		return 0;
3509
3510	rcu_read_lock();
3511	while (owner_running(lock, owner)) {
3512		if (need_resched())
3513			break;
3514
3515		arch_mutex_cpu_relax();
3516	}
3517	rcu_read_unlock();
3518
3519	/*
3520	 * We break out the loop above on need_resched() and when the
3521	 * owner changed, which is a sign for heavy contention. Return
3522	 * success only when lock->owner is NULL.
3523	 */
3524	return lock->owner == NULL;
3525}
3526#endif
3527
3528#ifdef CONFIG_PREEMPT
3529/*
3530 * this is the entry point to schedule() from in-kernel preemption
3531 * off of preempt_enable. Kernel preemptions off return from interrupt
3532 * occur there and call schedule directly.
3533 */
3534asmlinkage void __sched notrace preempt_schedule(void)
3535{
3536	struct thread_info *ti = current_thread_info();
3537
3538	/*
3539	 * If there is a non-zero preempt_count or interrupts are disabled,
3540	 * we do not want to preempt the current task. Just return..
3541	 */
3542	if (likely(ti->preempt_count || irqs_disabled()))
3543		return;
3544
3545	do {
3546		add_preempt_count_notrace(PREEMPT_ACTIVE);
3547		__schedule();
3548		sub_preempt_count_notrace(PREEMPT_ACTIVE);
3549
3550		/*
3551		 * Check again in case we missed a preemption opportunity
3552		 * between schedule and now.
3553		 */
3554		barrier();
3555	} while (need_resched());
3556}
3557EXPORT_SYMBOL(preempt_schedule);
3558
3559/*
3560 * this is the entry point to schedule() from kernel preemption
3561 * off of irq context.
3562 * Note, that this is called and return with irqs disabled. This will
3563 * protect us against recursive calling from irq.
3564 */
3565asmlinkage void __sched preempt_schedule_irq(void)
3566{
3567	struct thread_info *ti = current_thread_info();
3568
3569	/* Catch callers which need to be fixed */
3570	BUG_ON(ti->preempt_count || !irqs_disabled());
3571
3572	do {
3573		add_preempt_count(PREEMPT_ACTIVE);
3574		local_irq_enable();
3575		__schedule();
3576		local_irq_disable();
3577		sub_preempt_count(PREEMPT_ACTIVE);
3578
3579		/*
3580		 * Check again in case we missed a preemption opportunity
3581		 * between schedule and now.
3582		 */
3583		barrier();
3584	} while (need_resched());
3585}
3586
3587#endif /* CONFIG_PREEMPT */
3588
3589int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3590			  void *key)
3591{
3592	return try_to_wake_up(curr->private, mode, wake_flags);
3593}
3594EXPORT_SYMBOL(default_wake_function);
3595
3596/*
3597 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3598 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
3599 * number) then we wake all the non-exclusive tasks and one exclusive task.
3600 *
3601 * There are circumstances in which we can try to wake a task which has already
3602 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
3603 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3604 */
3605static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3606			int nr_exclusive, int wake_flags, void *key)
3607{
3608	wait_queue_t *curr, *next;
3609
3610	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
3611		unsigned flags = curr->flags;
3612
3613		if (curr->func(curr, mode, wake_flags, key) &&
3614				(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
3615			break;
3616	}
3617}
3618
3619/**
3620 * __wake_up - wake up threads blocked on a waitqueue.
3621 * @q: the waitqueue
3622 * @mode: which threads
3623 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3624 * @key: is directly passed to the wakeup function
3625 *
3626 * It may be assumed that this function implies a write memory barrier before
3627 * changing the task state if and only if any tasks are woken up.
3628 */
3629void __wake_up(wait_queue_head_t *q, unsigned int mode,
3630			int nr_exclusive, void *key)
3631{
3632	unsigned long flags;
3633
3634	spin_lock_irqsave(&q->lock, flags);
3635	__wake_up_common(q, mode, nr_exclusive, 0, key);
3636	spin_unlock_irqrestore(&q->lock, flags);
3637}
3638EXPORT_SYMBOL(__wake_up);
3639
3640/*
3641 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3642 */
3643void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
3644{
3645	__wake_up_common(q, mode, nr, 0, NULL);
3646}
3647EXPORT_SYMBOL_GPL(__wake_up_locked);
3648
3649void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3650{
3651	__wake_up_common(q, mode, 1, 0, key);
3652}
3653EXPORT_SYMBOL_GPL(__wake_up_locked_key);
3654
3655/**
3656 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
3657 * @q: the waitqueue
3658 * @mode: which threads
3659 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3660 * @key: opaque value to be passed to wakeup targets
3661 *
3662 * The sync wakeup differs that the waker knows that it will schedule
3663 * away soon, so while the target thread will be woken up, it will not
3664 * be migrated to another CPU - ie. the two threads are 'synchronized'
3665 * with each other. This can prevent needless bouncing between CPUs.
3666 *
3667 * On UP it can prevent extra preemption.
3668 *
3669 * It may be assumed that this function implies a write memory barrier before
3670 * changing the task state if and only if any tasks are woken up.
3671 */
3672void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3673			int nr_exclusive, void *key)
3674{
3675	unsigned long flags;
3676	int wake_flags = WF_SYNC;
3677
3678	if (unlikely(!q))
3679		return;
3680
3681	if (unlikely(!nr_exclusive))
3682		wake_flags = 0;
3683
3684	spin_lock_irqsave(&q->lock, flags);
3685	__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
3686	spin_unlock_irqrestore(&q->lock, flags);
3687}
3688EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3689
3690/*
3691 * __wake_up_sync - see __wake_up_sync_key()
3692 */
3693void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3694{
3695	__wake_up_sync_key(q, mode, nr_exclusive, NULL);
3696}
3697EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
3698
3699/**
3700 * complete: - signals a single thread waiting on this completion
3701 * @x:  holds the state of this particular completion
3702 *
3703 * This will wake up a single thread waiting on this completion. Threads will be
3704 * awakened in the same order in which they were queued.
3705 *
3706 * See also complete_all(), wait_for_completion() and related routines.
3707 *
3708 * It may be assumed that this function implies a write memory barrier before
3709 * changing the task state if and only if any tasks are woken up.
3710 */
3711void complete(struct completion *x)
3712{
3713	unsigned long flags;
3714
3715	spin_lock_irqsave(&x->wait.lock, flags);
3716	x->done++;
3717	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
3718	spin_unlock_irqrestore(&x->wait.lock, flags);
3719}
3720EXPORT_SYMBOL(complete);
3721
3722/**
3723 * complete_all: - signals all threads waiting on this completion
3724 * @x:  holds the state of this particular completion
3725 *
3726 * This will wake up all threads waiting on this particular completion event.
3727 *
3728 * It may be assumed that this function implies a write memory barrier before
3729 * changing the task state if and only if any tasks are woken up.
3730 */
3731void complete_all(struct completion *x)
3732{
3733	unsigned long flags;
3734
3735	spin_lock_irqsave(&x->wait.lock, flags);
3736	x->done += UINT_MAX/2;
3737	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
3738	spin_unlock_irqrestore(&x->wait.lock, flags);
3739}
3740EXPORT_SYMBOL(complete_all);
3741
3742static inline long __sched
3743do_wait_for_common(struct completion *x, long timeout, int state)
3744{
3745	if (!x->done) {
3746		DECLARE_WAITQUEUE(wait, current);
3747
3748		__add_wait_queue_tail_exclusive(&x->wait, &wait);
3749		do {
3750			if (signal_pending_state(state, current)) {
3751				timeout = -ERESTARTSYS;
3752				break;
3753			}
3754			__set_current_state(state);
3755			spin_unlock_irq(&x->wait.lock);
3756			timeout = schedule_timeout(timeout);
3757			spin_lock_irq(&x->wait.lock);
3758		} while (!x->done && timeout);
3759		__remove_wait_queue(&x->wait, &wait);
3760		if (!x->done)
3761			return timeout;
3762	}
3763	x->done--;
3764	return timeout ?: 1;
3765}
3766
3767static long __sched
3768wait_for_common(struct completion *x, long timeout, int state)
3769{
3770	might_sleep();
3771
3772	spin_lock_irq(&x->wait.lock);
3773	timeout = do_wait_for_common(x, timeout, state);
3774	spin_unlock_irq(&x->wait.lock);
3775	return timeout;
3776}
3777
3778/**
3779 * wait_for_completion: - waits for completion of a task
3780 * @x:  holds the state of this particular completion
3781 *
3782 * This waits to be signaled for completion of a specific task. It is NOT
3783 * interruptible and there is no timeout.
3784 *
3785 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3786 * and interrupt capability. Also see complete().
3787 */
3788void __sched wait_for_completion(struct completion *x)
3789{
3790	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
3791}
3792EXPORT_SYMBOL(wait_for_completion);
3793
3794/**
3795 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3796 * @x:  holds the state of this particular completion
3797 * @timeout:  timeout value in jiffies
3798 *
3799 * This waits for either a completion of a specific task to be signaled or for a
3800 * specified timeout to expire. The timeout is in jiffies. It is not
3801 * interruptible.
3802 *
3803 * The return value is 0 if timed out, and positive (at least 1, or number of
3804 * jiffies left till timeout) if completed.
3805 */
3806unsigned long __sched
3807wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3808{
3809	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
3810}
3811EXPORT_SYMBOL(wait_for_completion_timeout);
3812
3813/**
3814 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3815 * @x:  holds the state of this particular completion
3816 *
3817 * This waits for completion of a specific task to be signaled. It is
3818 * interruptible.
3819 *
3820 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
3821 */
3822int __sched wait_for_completion_interruptible(struct completion *x)
3823{
3824	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3825	if (t == -ERESTARTSYS)
3826		return t;
3827	return 0;
3828}
3829EXPORT_SYMBOL(wait_for_completion_interruptible);
3830
3831/**
3832 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3833 * @x:  holds the state of this particular completion
3834 * @timeout:  timeout value in jiffies
3835 *
3836 * This waits for either a completion of a specific task to be signaled or for a
3837 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
3838 *
3839 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3840 * positive (at least 1, or number of jiffies left till timeout) if completed.
3841 */
3842long __sched
3843wait_for_completion_interruptible_timeout(struct completion *x,
3844					  unsigned long timeout)
3845{
3846	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
3847}
3848EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
3849
3850/**
3851 * wait_for_completion_killable: - waits for completion of a task (killable)
3852 * @x:  holds the state of this particular completion
3853 *
3854 * This waits to be signaled for completion of a specific task. It can be
3855 * interrupted by a kill signal.
3856 *
3857 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
3858 */
3859int __sched wait_for_completion_killable(struct completion *x)
3860{
3861	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
3862	if (t == -ERESTARTSYS)
3863		return t;
3864	return 0;
3865}
3866EXPORT_SYMBOL(wait_for_completion_killable);
3867
3868/**
3869 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3870 * @x:  holds the state of this particular completion
3871 * @timeout:  timeout value in jiffies
3872 *
3873 * This waits for either a completion of a specific task to be
3874 * signaled or for a specified timeout to expire. It can be
3875 * interrupted by a kill signal. The timeout is in jiffies.
3876 *
3877 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3878 * positive (at least 1, or number of jiffies left till timeout) if completed.
3879 */
3880long __sched
3881wait_for_completion_killable_timeout(struct completion *x,
3882				     unsigned long timeout)
3883{
3884	return wait_for_common(x, timeout, TASK_KILLABLE);
3885}
3886EXPORT_SYMBOL(wait_for_completion_killable_timeout);
3887
3888/**
3889 *	try_wait_for_completion - try to decrement a completion without blocking
3890 *	@x:	completion structure
3891 *
3892 *	Returns: 0 if a decrement cannot be done without blocking
3893 *		 1 if a decrement succeeded.
3894 *
3895 *	If a completion is being used as a counting completion,
3896 *	attempt to decrement the counter without blocking. This
3897 *	enables us to avoid waiting if the resource the completion
3898 *	is protecting is not available.
3899 */
3900bool try_wait_for_completion(struct completion *x)
3901{
3902	unsigned long flags;
3903	int ret = 1;
3904
3905	spin_lock_irqsave(&x->wait.lock, flags);
3906	if (!x->done)
3907		ret = 0;
3908	else
3909		x->done--;
3910	spin_unlock_irqrestore(&x->wait.lock, flags);
3911	return ret;
3912}
3913EXPORT_SYMBOL(try_wait_for_completion);
3914
3915/**
3916 *	completion_done - Test to see if a completion has any waiters
3917 *	@x:	completion structure
3918 *
3919 *	Returns: 0 if there are waiters (wait_for_completion() in progress)
3920 *		 1 if there are no waiters.
3921 *
3922 */
3923bool completion_done(struct completion *x)
3924{
3925	unsigned long flags;
3926	int ret = 1;
3927
3928	spin_lock_irqsave(&x->wait.lock, flags);
3929	if (!x->done)
3930		ret = 0;
3931	spin_unlock_irqrestore(&x->wait.lock, flags);
3932	return ret;
3933}
3934EXPORT_SYMBOL(completion_done);
3935
3936static long __sched
3937sleep_on_common(wait_queue_head_t *q, int state, long timeout)
3938{
3939	unsigned long flags;
3940	wait_queue_t wait;
3941
3942	init_waitqueue_entry(&wait, current);
3943
3944	__set_current_state(state);
3945
3946	spin_lock_irqsave(&q->lock, flags);
3947	__add_wait_queue(q, &wait);
3948	spin_unlock(&q->lock);
3949	timeout = schedule_timeout(timeout);
3950	spin_lock_irq(&q->lock);
3951	__remove_wait_queue(q, &wait);
3952	spin_unlock_irqrestore(&q->lock, flags);
3953
3954	return timeout;
3955}
3956
3957void __sched interruptible_sleep_on(wait_queue_head_t *q)
3958{
3959	sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3960}
3961EXPORT_SYMBOL(interruptible_sleep_on);
3962
3963long __sched
3964interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3965{
3966	return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
3967}
3968EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3969
3970void __sched sleep_on(wait_queue_head_t *q)
3971{
3972	sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3973}
3974EXPORT_SYMBOL(sleep_on);
3975
3976long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3977{
3978	return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
3979}
3980EXPORT_SYMBOL(sleep_on_timeout);
3981
3982#ifdef CONFIG_RT_MUTEXES
3983
3984/*
3985 * rt_mutex_setprio - set the current priority of a task
3986 * @p: task
3987 * @prio: prio value (kernel-internal form)
3988 *
3989 * This function changes the 'effective' priority of a task. It does
3990 * not touch ->normal_prio like __setscheduler().
3991 *
3992 * Used by the rt_mutex code to implement priority inheritance logic.
3993 */
3994void rt_mutex_setprio(struct task_struct *p, int prio)
3995{
3996	int oldprio, on_rq, running;
3997	struct rq *rq;
3998	const struct sched_class *prev_class;
3999
4000	BUG_ON(prio < 0 || prio > MAX_PRIO);
4001
4002	rq = __task_rq_lock(p);
4003
4004	/*
4005	 * Idle task boosting is a nono in general. There is one
4006	 * exception, when PREEMPT_RT and NOHZ is active:
4007	 *
4008	 * The idle task calls get_next_timer_interrupt() and holds
4009	 * the timer wheel base->lock on the CPU and another CPU wants
4010	 * to access the timer (probably to cancel it). We can safely
4011	 * ignore the boosting request, as the idle CPU runs this code
4012	 * with interrupts disabled and will complete the lock
4013	 * protected section without being interrupted. So there is no
4014	 * real need to boost.
4015	 */
4016	if (unlikely(p == rq->idle)) {
4017		WARN_ON(p != rq->curr);
4018		WARN_ON(p->pi_blocked_on);
4019		goto out_unlock;
4020	}
4021
4022	trace_sched_pi_setprio(p, prio);
4023	oldprio = p->prio;
4024	prev_class = p->sched_class;
4025	on_rq = p->on_rq;
4026	running = task_current(rq, p);
4027	if (on_rq)
4028		dequeue_task(rq, p, 0);
4029	if (running)
4030		p->sched_class->put_prev_task(rq, p);
4031
4032	if (rt_prio(prio))
4033		p->sched_class = &rt_sched_class;
4034	else
4035		p->sched_class = &fair_sched_class;
4036
4037	p->prio = prio;
4038
4039	if (running)
4040		p->sched_class->set_curr_task(rq);
4041	if (on_rq)
4042		enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4043
4044	check_class_changed(rq, p, prev_class, oldprio);
4045out_unlock:
4046	__task_rq_unlock(rq);
4047}
4048#endif
4049void set_user_nice(struct task_struct *p, long nice)
4050{
4051	int old_prio, delta, on_rq;
4052	unsigned long flags;
4053	struct rq *rq;
4054
4055	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4056		return;
4057	/*
4058	 * We have to be careful, if called from sys_setpriority(),
4059	 * the task might be in the middle of scheduling on another CPU.
4060	 */
4061	rq = task_rq_lock(p, &flags);
4062	/*
4063	 * The RT priorities are set via sched_setscheduler(), but we still
4064	 * allow the 'normal' nice value to be set - but as expected
4065	 * it wont have any effect on scheduling until the task is
4066	 * SCHED_FIFO/SCHED_RR:
4067	 */
4068	if (task_has_rt_policy(p)) {
4069		p->static_prio = NICE_TO_PRIO(nice);
4070		goto out_unlock;
4071	}
4072	on_rq = p->on_rq;
4073	if (on_rq)
4074		dequeue_task(rq, p, 0);
4075
4076	p->static_prio = NICE_TO_PRIO(nice);
4077	set_load_weight(p);
4078	old_prio = p->prio;
4079	p->prio = effective_prio(p);
4080	delta = p->prio - old_prio;
4081
4082	if (on_rq) {
4083		enqueue_task(rq, p, 0);
4084		/*
4085		 * If the task increased its priority or is running and
4086		 * lowered its priority, then reschedule its CPU:
4087		 */
4088		if (delta < 0 || (delta > 0 && task_running(rq, p)))
4089			resched_task(rq->curr);
4090	}
4091out_unlock:
4092	task_rq_unlock(rq, p, &flags);
4093}
4094EXPORT_SYMBOL(set_user_nice);
4095
4096/*
4097 * can_nice - check if a task can reduce its nice value
4098 * @p: task
4099 * @nice: nice value
4100 */
4101int can_nice(const struct task_struct *p, const int nice)
4102{
4103	/* convert nice value [19,-20] to rlimit style value [1,40] */
4104	int nice_rlim = 20 - nice;
4105
4106	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
4107		capable(CAP_SYS_NICE));
4108}
4109
4110#ifdef __ARCH_WANT_SYS_NICE
4111
4112/*
4113 * sys_nice - change the priority of the current process.
4114 * @increment: priority increment
4115 *
4116 * sys_setpriority is a more generic, but much slower function that
4117 * does similar things.
4118 */
4119SYSCALL_DEFINE1(nice, int, increment)
4120{
4121	long nice, retval;
4122
4123	/*
4124	 * Setpriority might change our priority at the same moment.
4125	 * We don't have to worry. Conceptually one call occurs first
4126	 * and we have a single winner.
4127	 */
4128	if (increment < -40)
4129		increment = -40;
4130	if (increment > 40)
4131		increment = 40;
4132
4133	nice = TASK_NICE(current) + increment;
4134	if (nice < -20)
4135		nice = -20;
4136	if (nice > 19)
4137		nice = 19;
4138
4139	if (increment < 0 && !can_nice(current, nice))
4140		return -EPERM;
4141
4142	retval = security_task_setnice(current, nice);
4143	if (retval)
4144		return retval;
4145
4146	set_user_nice(current, nice);
4147	return 0;
4148}
4149
4150#endif
4151
4152/**
4153 * task_prio - return the priority value of a given task.
4154 * @p: the task in question.
4155 *
4156 * This is the priority value as seen by users in /proc.
4157 * RT tasks are offset by -200. Normal tasks are centered
4158 * around 0, value goes from -16 to +15.
4159 */
4160int task_prio(const struct task_struct *p)
4161{
4162	return p->prio - MAX_RT_PRIO;
4163}
4164
4165/**
4166 * task_nice - return the nice value of a given task.
4167 * @p: the task in question.
4168 */
4169int task_nice(const struct task_struct *p)
4170{
4171	return TASK_NICE(p);
4172}
4173EXPORT_SYMBOL(task_nice);
4174
4175/**
4176 * idle_cpu - is a given cpu idle currently?
4177 * @cpu: the processor in question.
4178 */
4179int idle_cpu(int cpu)
4180{
4181	struct rq *rq = cpu_rq(cpu);
4182
4183	if (rq->curr != rq->idle)
4184		return 0;
4185
4186	if (rq->nr_running)
4187		return 0;
4188
4189#ifdef CONFIG_SMP
4190	if (!llist_empty(&rq->wake_list))
4191		return 0;
4192#endif
4193
4194	return 1;
4195}
4196
4197/**
4198 * idle_task - return the idle task for a given cpu.
4199 * @cpu: the processor in question.
4200 */
4201struct task_struct *idle_task(int cpu)
4202{
4203	return cpu_rq(cpu)->idle;
4204}
4205
4206/**
4207 * find_process_by_pid - find a process with a matching PID value.
4208 * @pid: the pid in question.
4209 */
4210static struct task_struct *find_process_by_pid(pid_t pid)
4211{
4212	return pid ? find_task_by_vpid(pid) : current;
4213}
4214
4215/* Actually do priority change: must hold rq lock. */
4216static void
4217__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4218{
4219	p->policy = policy;
4220	p->rt_priority = prio;
4221	p->normal_prio = normal_prio(p);
4222	/* we are holding p->pi_lock already */
4223	p->prio = rt_mutex_getprio(p);
4224	if (rt_prio(p->prio))
4225		p->sched_class = &rt_sched_class;
4226	else
4227		p->sched_class = &fair_sched_class;
4228	set_load_weight(p);
4229}
4230
4231/*
4232 * check the target process has a UID that matches the current process's
4233 */
4234static bool check_same_owner(struct task_struct *p)
4235{
4236	const struct cred *cred = current_cred(), *pcred;
4237	bool match;
4238
4239	rcu_read_lock();
4240	pcred = __task_cred(p);
4241	match = (uid_eq(cred->euid, pcred->euid) ||
4242		 uid_eq(cred->euid, pcred->uid));
4243	rcu_read_unlock();
4244	return match;
4245}
4246
4247static int __sched_setscheduler(struct task_struct *p, int policy,
4248				const struct sched_param *param, bool user)
4249{
4250	int retval, oldprio, oldpolicy = -1, on_rq, running;
4251	unsigned long flags;
4252	const struct sched_class *prev_class;
4253	struct rq *rq;
4254	int reset_on_fork;
4255
4256	/* may grab non-irq protected spin_locks */
4257	BUG_ON(in_interrupt());
4258recheck:
4259	/* double check policy once rq lock held */
4260	if (policy < 0) {
4261		reset_on_fork = p->sched_reset_on_fork;
4262		policy = oldpolicy = p->policy;
4263	} else {
4264		reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4265		policy &= ~SCHED_RESET_ON_FORK;
4266
4267		if (policy != SCHED_FIFO && policy != SCHED_RR &&
4268				policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4269				policy != SCHED_IDLE)
4270			return -EINVAL;
4271	}
4272
4273	/*
4274	 * Valid priorities for SCHED_FIFO and SCHED_RR are
4275	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4276	 * SCHED_BATCH and SCHED_IDLE is 0.
4277	 */
4278	if (param->sched_priority < 0 ||
4279	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
4280	    (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
4281		return -EINVAL;
4282	if (rt_policy(policy) != (param->sched_priority != 0))
4283		return -EINVAL;
4284
4285	/*
4286	 * Allow unprivileged RT tasks to decrease priority:
4287	 */
4288	if (user && !capable(CAP_SYS_NICE)) {
4289		if (rt_policy(policy)) {
4290			unsigned long rlim_rtprio =
4291					task_rlimit(p, RLIMIT_RTPRIO);
4292
4293			/* can't set/change the rt policy */
4294			if (policy != p->policy && !rlim_rtprio)
4295				return -EPERM;
4296
4297			/* can't increase priority */
4298			if (param->sched_priority > p->rt_priority &&
4299			    param->sched_priority > rlim_rtprio)
4300				return -EPERM;
4301		}
4302
4303		/*
4304		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4305		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
4306		 */
4307		if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4308			if (!can_nice(p, TASK_NICE(p)))
4309				return -EPERM;
4310		}
4311
4312		/* can't change other user's priorities */
4313		if (!check_same_owner(p))
4314			return -EPERM;
4315
4316		/* Normal users shall not reset the sched_reset_on_fork flag */
4317		if (p->sched_reset_on_fork && !reset_on_fork)
4318			return -EPERM;
4319	}
4320
4321	if (user) {
4322		retval = security_task_setscheduler(p);
4323		if (retval)
4324			return retval;
4325	}
4326
4327	/*
4328	 * make sure no PI-waiters arrive (or leave) while we are
4329	 * changing the priority of the task:
4330	 *
4331	 * To be able to change p->policy safely, the appropriate
4332	 * runqueue lock must be held.
4333	 */
4334	rq = task_rq_lock(p, &flags);
4335
4336	/*
4337	 * Changing the policy of the stop threads its a very bad idea
4338	 */
4339	if (p == rq->stop) {
4340		task_rq_unlock(rq, p, &flags);
4341		return -EINVAL;
4342	}
4343
4344	/*
4345	 * If not changing anything there's no need to proceed further:
4346	 */
4347	if (unlikely(policy == p->policy && (!rt_policy(policy) ||
4348			param->sched_priority == p->rt_priority))) {
4349		task_rq_unlock(rq, p, &flags);
4350		return 0;
4351	}
4352
4353#ifdef CONFIG_RT_GROUP_SCHED
4354	if (user) {
4355		/*
4356		 * Do not allow realtime tasks into groups that have no runtime
4357		 * assigned.
4358		 */
4359		if (rt_bandwidth_enabled() && rt_policy(policy) &&
4360				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4361				!task_group_is_autogroup(task_group(p))) {
4362			task_rq_unlock(rq, p, &flags);
4363			return -EPERM;
4364		}
4365	}
4366#endif
4367
4368	/* recheck policy now with rq lock held */
4369	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4370		policy = oldpolicy = -1;
4371		task_rq_unlock(rq, p, &flags);
4372		goto recheck;
4373	}
4374	on_rq = p->on_rq;
4375	running = task_current(rq, p);
4376	if (on_rq)
4377		dequeue_task(rq, p, 0);
4378	if (running)
4379		p->sched_class->put_prev_task(rq, p);
4380
4381	p->sched_reset_on_fork = reset_on_fork;
4382
4383	oldprio = p->prio;
4384	prev_class = p->sched_class;
4385	__setscheduler(rq, p, policy, param->sched_priority);
4386
4387	if (running)
4388		p->sched_class->set_curr_task(rq);
4389	if (on_rq)
4390		enqueue_task(rq, p, 0);
4391
4392	check_class_changed(rq, p, prev_class, oldprio);
4393	task_rq_unlock(rq, p, &flags);
4394
4395	rt_mutex_adjust_pi(p);
4396
4397	return 0;
4398}
4399
4400/**
4401 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4402 * @p: the task in question.
4403 * @policy: new policy.
4404 * @param: structure containing the new RT priority.
4405 *
4406 * NOTE that the task may be already dead.
4407 */
4408int sched_setscheduler(struct task_struct *p, int policy,
4409		       const struct sched_param *param)
4410{
4411	return __sched_setscheduler(p, policy, param, true);
4412}
4413EXPORT_SYMBOL_GPL(sched_setscheduler);
4414
4415/**
4416 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4417 * @p: the task in question.
4418 * @policy: new policy.
4419 * @param: structure containing the new RT priority.
4420 *
4421 * Just like sched_setscheduler, only don't bother checking if the
4422 * current context has permission.  For example, this is needed in
4423 * stop_machine(): we create temporary high priority worker threads,
4424 * but our caller might not have that capability.
4425 */
4426int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4427			       const struct sched_param *param)
4428{
4429	return __sched_setscheduler(p, policy, param, false);
4430}
4431
4432static int
4433do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4434{
4435	struct sched_param lparam;
4436	struct task_struct *p;
4437	int retval;
4438
4439	if (!param || pid < 0)
4440		return -EINVAL;
4441	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4442		return -EFAULT;
4443
4444	rcu_read_lock();
4445	retval = -ESRCH;
4446	p = find_process_by_pid(pid);
4447	if (p != NULL)
4448		retval = sched_setscheduler(p, policy, &lparam);
4449	rcu_read_unlock();
4450
4451	return retval;
4452}
4453
4454/**
4455 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4456 * @pid: the pid in question.
4457 * @policy: new policy.
4458 * @param: structure containing the new RT priority.
4459 */
4460SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4461		struct sched_param __user *, param)
4462{
4463	/* negative values for policy are not valid */
4464	if (policy < 0)
4465		return -EINVAL;
4466
4467	return do_sched_setscheduler(pid, policy, param);
4468}
4469
4470/**
4471 * sys_sched_setparam - set/change the RT priority of a thread
4472 * @pid: the pid in question.
4473 * @param: structure containing the new RT priority.
4474 */
4475SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4476{
4477	return do_sched_setscheduler(pid, -1, param);
4478}
4479
4480/**
4481 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4482 * @pid: the pid in question.
4483 */
4484SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4485{
4486	struct task_struct *p;
4487	int retval;
4488
4489	if (pid < 0)
4490		return -EINVAL;
4491
4492	retval = -ESRCH;
4493	rcu_read_lock();
4494	p = find_process_by_pid(pid);
4495	if (p) {
4496		retval = security_task_getscheduler(p);
4497		if (!retval)
4498			retval = p->policy
4499				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4500	}
4501	rcu_read_unlock();
4502	return retval;
4503}
4504
4505/**
4506 * sys_sched_getparam - get the RT priority of a thread
4507 * @pid: the pid in question.
4508 * @param: structure containing the RT priority.
4509 */
4510SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4511{
4512	struct sched_param lp;
4513	struct task_struct *p;
4514	int retval;
4515
4516	if (!param || pid < 0)
4517		return -EINVAL;
4518
4519	rcu_read_lock();
4520	p = find_process_by_pid(pid);
4521	retval = -ESRCH;
4522	if (!p)
4523		goto out_unlock;
4524
4525	retval = security_task_getscheduler(p);
4526	if (retval)
4527		goto out_unlock;
4528
4529	lp.sched_priority = p->rt_priority;
4530	rcu_read_unlock();
4531
4532	/*
4533	 * This one might sleep, we cannot do it with a spinlock held ...
4534	 */
4535	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4536
4537	return retval;
4538
4539out_unlock:
4540	rcu_read_unlock();
4541	return retval;
4542}
4543
4544long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4545{
4546	cpumask_var_t cpus_allowed, new_mask;
4547	struct task_struct *p;
4548	int retval;
4549
4550	get_online_cpus();
4551	rcu_read_lock();
4552
4553	p = find_process_by_pid(pid);
4554	if (!p) {
4555		rcu_read_unlock();
4556		put_online_cpus();
4557		return -ESRCH;
4558	}
4559
4560	/* Prevent p going away */
4561	get_task_struct(p);
4562	rcu_read_unlock();
4563
4564	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4565		retval = -ENOMEM;
4566		goto out_put_task;
4567	}
4568	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4569		retval = -ENOMEM;
4570		goto out_free_cpus_allowed;
4571	}
4572	retval = -EPERM;
4573	if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
4574		goto out_unlock;
4575
4576	retval = security_task_setscheduler(p);
4577	if (retval)
4578		goto out_unlock;
4579
4580	cpuset_cpus_allowed(p, cpus_allowed);
4581	cpumask_and(new_mask, in_mask, cpus_allowed);
4582again:
4583	retval = set_cpus_allowed_ptr(p, new_mask);
4584
4585	if (!retval) {
4586		cpuset_cpus_allowed(p, cpus_allowed);
4587		if (!cpumask_subset(new_mask, cpus_allowed)) {
4588			/*
4589			 * We must have raced with a concurrent cpuset
4590			 * update. Just reset the cpus_allowed to the
4591			 * cpuset's cpus_allowed
4592			 */
4593			cpumask_copy(new_mask, cpus_allowed);
4594			goto again;
4595		}
4596	}
4597out_unlock:
4598	free_cpumask_var(new_mask);
4599out_free_cpus_allowed:
4600	free_cpumask_var(cpus_allowed);
4601out_put_task:
4602	put_task_struct(p);
4603	put_online_cpus();
4604	return retval;
4605}
4606
4607static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4608			     struct cpumask *new_mask)
4609{
4610	if (len < cpumask_size())
4611		cpumask_clear(new_mask);
4612	else if (len > cpumask_size())
4613		len = cpumask_size();
4614
4615	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4616}
4617
4618/**
4619 * sys_sched_setaffinity - set the cpu affinity of a process
4620 * @pid: pid of the process
4621 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4622 * @user_mask_ptr: user-space pointer to the new cpu mask
4623 */
4624SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4625		unsigned long __user *, user_mask_ptr)
4626{
4627	cpumask_var_t new_mask;
4628	int retval;
4629
4630	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4631		return -ENOMEM;
4632
4633	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4634	if (retval == 0)
4635		retval = sched_setaffinity(pid, new_mask);
4636	free_cpumask_var(new_mask);
4637	return retval;
4638}
4639
4640long sched_getaffinity(pid_t pid, struct cpumask *mask)
4641{
4642	struct task_struct *p;
4643	unsigned long flags;
4644	int retval;
4645
4646	get_online_cpus();
4647	rcu_read_lock();
4648
4649	retval = -ESRCH;
4650	p = find_process_by_pid(pid);
4651	if (!p)
4652		goto out_unlock;
4653
4654	retval = security_task_getscheduler(p);
4655	if (retval)
4656		goto out_unlock;
4657
4658	raw_spin_lock_irqsave(&p->pi_lock, flags);
4659	cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
4660	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4661
4662out_unlock:
4663	rcu_read_unlock();
4664	put_online_cpus();
4665
4666	return retval;
4667}
4668
4669/**
4670 * sys_sched_getaffinity - get the cpu affinity of a process
4671 * @pid: pid of the process
4672 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4673 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4674 */
4675SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4676		unsigned long __user *, user_mask_ptr)
4677{
4678	int ret;
4679	cpumask_var_t mask;
4680
4681	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4682		return -EINVAL;
4683	if (len & (sizeof(unsigned long)-1))
4684		return -EINVAL;
4685
4686	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4687		return -ENOMEM;
4688
4689	ret = sched_getaffinity(pid, mask);
4690	if (ret == 0) {
4691		size_t retlen = min_t(size_t, len, cpumask_size());
4692
4693		if (copy_to_user(user_mask_ptr, mask, retlen))
4694			ret = -EFAULT;
4695		else
4696			ret = retlen;
4697	}
4698	free_cpumask_var(mask);
4699
4700	return ret;
4701}
4702
4703/**
4704 * sys_sched_yield - yield the current processor to other threads.
4705 *
4706 * This function yields the current CPU to other tasks. If there are no
4707 * other threads running on this CPU then this function will return.
4708 */
4709SYSCALL_DEFINE0(sched_yield)
4710{
4711	struct rq *rq = this_rq_lock();
4712
4713	schedstat_inc(rq, yld_count);
4714	current->sched_class->yield_task(rq);
4715
4716	/*
4717	 * Since we are going to call schedule() anyway, there's
4718	 * no need to preempt or enable interrupts:
4719	 */
4720	__release(rq->lock);
4721	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4722	do_raw_spin_unlock(&rq->lock);
4723	sched_preempt_enable_no_resched();
4724
4725	schedule();
4726
4727	return 0;
4728}
4729
4730static inline int should_resched(void)
4731{
4732	return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4733}
4734
4735static void __cond_resched(void)
4736{
4737	add_preempt_count(PREEMPT_ACTIVE);
4738	__schedule();
4739	sub_preempt_count(PREEMPT_ACTIVE);
4740}
4741
4742int __sched _cond_resched(void)
4743{
4744	if (should_resched()) {
4745		__cond_resched();
4746		return 1;
4747	}
4748	return 0;
4749}
4750EXPORT_SYMBOL(_cond_resched);
4751
4752/*
4753 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4754 * call schedule, and on return reacquire the lock.
4755 *
4756 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4757 * operations here to prevent schedule() from being called twice (once via
4758 * spin_unlock(), once by hand).
4759 */
4760int __cond_resched_lock(spinlock_t *lock)
4761{
4762	int resched = should_resched();
4763	int ret = 0;
4764
4765	lockdep_assert_held(lock);
4766
4767	if (spin_needbreak(lock) || resched) {
4768		spin_unlock(lock);
4769		if (resched)
4770			__cond_resched();
4771		else
4772			cpu_relax();
4773		ret = 1;
4774		spin_lock(lock);
4775	}
4776	return ret;
4777}
4778EXPORT_SYMBOL(__cond_resched_lock);
4779
4780int __sched __cond_resched_softirq(void)
4781{
4782	BUG_ON(!in_softirq());
4783
4784	if (should_resched()) {
4785		local_bh_enable();
4786		__cond_resched();
4787		local_bh_disable();
4788		return 1;
4789	}
4790	return 0;
4791}
4792EXPORT_SYMBOL(__cond_resched_softirq);
4793
4794/**
4795 * yield - yield the current processor to other threads.
4796 *
4797 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4798 *
4799 * The scheduler is at all times free to pick the calling task as the most
4800 * eligible task to run, if removing the yield() call from your code breaks
4801 * it, its already broken.
4802 *
4803 * Typical broken usage is:
4804 *
4805 * while (!event)
4806 * 	yield();
4807 *
4808 * where one assumes that yield() will let 'the other' process run that will
4809 * make event true. If the current task is a SCHED_FIFO task that will never
4810 * happen. Never use yield() as a progress guarantee!!
4811 *
4812 * If you want to use yield() to wait for something, use wait_event().
4813 * If you want to use yield() to be 'nice' for others, use cond_resched().
4814 * If you still want to use yield(), do not!
4815 */
4816void __sched yield(void)
4817{
4818	set_current_state(TASK_RUNNING);
4819	sys_sched_yield();
4820}
4821EXPORT_SYMBOL(yield);
4822
4823/**
4824 * yield_to - yield the current processor to another thread in
4825 * your thread group, or accelerate that thread toward the
4826 * processor it's on.
4827 * @p: target task
4828 * @preempt: whether task preemption is allowed or not
4829 *
4830 * It's the caller's job to ensure that the target task struct
4831 * can't go away on us before we can do any checks.
4832 *
4833 * Returns true if we indeed boosted the target task.
4834 */
4835bool __sched yield_to(struct task_struct *p, bool preempt)
4836{
4837	struct task_struct *curr = current;
4838	struct rq *rq, *p_rq;
4839	unsigned long flags;
4840	bool yielded = 0;
4841
4842	local_irq_save(flags);
4843	rq = this_rq();
4844
4845again:
4846	p_rq = task_rq(p);
4847	double_rq_lock(rq, p_rq);
4848	while (task_rq(p) != p_rq) {
4849		double_rq_unlock(rq, p_rq);
4850		goto again;
4851	}
4852
4853	if (!curr->sched_class->yield_to_task)
4854		goto out;
4855
4856	if (curr->sched_class != p->sched_class)
4857		goto out;
4858
4859	if (task_running(p_rq, p) || p->state)
4860		goto out;
4861
4862	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4863	if (yielded) {
4864		schedstat_inc(rq, yld_count);
4865		/*
4866		 * Make p's CPU reschedule; pick_next_entity takes care of
4867		 * fairness.
4868		 */
4869		if (preempt && rq != p_rq)
4870			resched_task(p_rq->curr);
4871	} else {
4872		/*
4873		 * We might have set it in task_yield_fair(), but are
4874		 * not going to schedule(), so don't want to skip
4875		 * the next update.
4876		 */
4877		rq->skip_clock_update = 0;
4878	}
4879
4880out:
4881	double_rq_unlock(rq, p_rq);
4882	local_irq_restore(flags);
4883
4884	if (yielded)
4885		schedule();
4886
4887	return yielded;
4888}
4889EXPORT_SYMBOL_GPL(yield_to);
4890
4891/*
4892 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4893 * that process accounting knows that this is a task in IO wait state.
4894 */
4895void __sched io_schedule(void)
4896{
4897	struct rq *rq = raw_rq();
4898
4899	delayacct_blkio_start();
4900	atomic_inc(&rq->nr_iowait);
4901	blk_flush_plug(current);
4902	current->in_iowait = 1;
4903	schedule();
4904	current->in_iowait = 0;
4905	atomic_dec(&rq->nr_iowait);
4906	delayacct_blkio_end();
4907}
4908EXPORT_SYMBOL(io_schedule);
4909
4910long __sched io_schedule_timeout(long timeout)
4911{
4912	struct rq *rq = raw_rq();
4913	long ret;
4914
4915	delayacct_blkio_start();
4916	atomic_inc(&rq->nr_iowait);
4917	blk_flush_plug(current);
4918	current->in_iowait = 1;
4919	ret = schedule_timeout(timeout);
4920	current->in_iowait = 0;
4921	atomic_dec(&rq->nr_iowait);
4922	delayacct_blkio_end();
4923	return ret;
4924}
4925
4926/**
4927 * sys_sched_get_priority_max - return maximum RT priority.
4928 * @policy: scheduling class.
4929 *
4930 * this syscall returns the maximum rt_priority that can be used
4931 * by a given scheduling class.
4932 */
4933SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4934{
4935	int ret = -EINVAL;
4936
4937	switch (policy) {
4938	case SCHED_FIFO:
4939	case SCHED_RR:
4940		ret = MAX_USER_RT_PRIO-1;
4941		break;
4942	case SCHED_NORMAL:
4943	case SCHED_BATCH:
4944	case SCHED_IDLE:
4945		ret = 0;
4946		break;
4947	}
4948	return ret;
4949}
4950
4951/**
4952 * sys_sched_get_priority_min - return minimum RT priority.
4953 * @policy: scheduling class.
4954 *
4955 * this syscall returns the minimum rt_priority that can be used
4956 * by a given scheduling class.
4957 */
4958SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4959{
4960	int ret = -EINVAL;
4961
4962	switch (policy) {
4963	case SCHED_FIFO:
4964	case SCHED_RR:
4965		ret = 1;
4966		break;
4967	case SCHED_NORMAL:
4968	case SCHED_BATCH:
4969	case SCHED_IDLE:
4970		ret = 0;
4971	}
4972	return ret;
4973}
4974
4975/**
4976 * sys_sched_rr_get_interval - return the default timeslice of a process.
4977 * @pid: pid of the process.
4978 * @interval: userspace pointer to the timeslice value.
4979 *
4980 * this syscall writes the default timeslice value of a given process
4981 * into the user-space timespec buffer. A value of '0' means infinity.
4982 */
4983SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4984		struct timespec __user *, interval)
4985{
4986	struct task_struct *p;
4987	unsigned int time_slice;
4988	unsigned long flags;
4989	struct rq *rq;
4990	int retval;
4991	struct timespec t;
4992
4993	if (pid < 0)
4994		return -EINVAL;
4995
4996	retval = -ESRCH;
4997	rcu_read_lock();
4998	p = find_process_by_pid(pid);
4999	if (!p)
5000		goto out_unlock;
5001
5002	retval = security_task_getscheduler(p);
5003	if (retval)
5004		goto out_unlock;
5005
5006	rq = task_rq_lock(p, &flags);
5007	time_slice = p->sched_class->get_rr_interval(rq, p);
5008	task_rq_unlock(rq, p, &flags);
5009
5010	rcu_read_unlock();
5011	jiffies_to_timespec(time_slice, &t);
5012	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5013	return retval;
5014
5015out_unlock:
5016	rcu_read_unlock();
5017	return retval;
5018}
5019
5020static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5021
5022void sched_show_task(struct task_struct *p)
5023{
5024	unsigned long free = 0;
5025	unsigned state;
5026
5027	state = p->state ? __ffs(p->state) + 1 : 0;
5028	printk(KERN_INFO "%-15.15s %c", p->comm,
5029		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5030#if BITS_PER_LONG == 32
5031	if (state == TASK_RUNNING)
5032		printk(KERN_CONT " running  ");
5033	else
5034		printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5035#else
5036	if (state == TASK_RUNNING)
5037		printk(KERN_CONT "  running task    ");
5038	else
5039		printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5040#endif
5041#ifdef CONFIG_DEBUG_STACK_USAGE
5042	free = stack_not_used(p);
5043#endif
5044	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
5045		task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
5046		(unsigned long)task_thread_info(p)->flags);
5047
5048	show_stack(p, NULL);
5049}
5050
5051void show_state_filter(unsigned long state_filter)
5052{
5053	struct task_struct *g, *p;
5054
5055#if BITS_PER_LONG == 32
5056	printk(KERN_INFO
5057		"  task                PC stack   pid father\n");
5058#else
5059	printk(KERN_INFO
5060		"  task                        PC stack   pid father\n");
5061#endif
5062	rcu_read_lock();
5063	do_each_thread(g, p) {
5064		/*
5065		 * reset the NMI-timeout, listing all files on a slow
5066		 * console might take a lot of time:
5067		 */
5068		touch_nmi_watchdog();
5069		if (!state_filter || (p->state & state_filter))
5070			sched_show_task(p);
5071	} while_each_thread(g, p);
5072
5073	touch_all_softlockup_watchdogs();
5074
5075#ifdef CONFIG_SCHED_DEBUG
5076	sysrq_sched_debug_show();
5077#endif
5078	rcu_read_unlock();
5079	/*
5080	 * Only show locks if all tasks are dumped:
5081	 */
5082	if (!state_filter)
5083		debug_show_all_locks();
5084}
5085
5086void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5087{
5088	idle->sched_class = &idle_sched_class;
5089}
5090
5091/**
5092 * init_idle - set up an idle thread for a given CPU
5093 * @idle: task in question
5094 * @cpu: cpu the idle task belongs to
5095 *
5096 * NOTE: this function does not set the idle thread's NEED_RESCHED
5097 * flag, to make booting more robust.
5098 */
5099void __cpuinit init_idle(struct task_struct *idle, int cpu)
5100{
5101	struct rq *rq = cpu_rq(cpu);
5102	unsigned long flags;
5103
5104	raw_spin_lock_irqsave(&rq->lock, flags);
5105
5106	__sched_fork(idle);
5107	idle->state = TASK_RUNNING;
5108	idle->se.exec_start = sched_clock();
5109
5110	do_set_cpus_allowed(idle, cpumask_of(cpu));
5111	/*
5112	 * We're having a chicken and egg problem, even though we are
5113	 * holding rq->lock, the cpu isn't yet set to this cpu so the
5114	 * lockdep check in task_group() will fail.
5115	 *
5116	 * Similar case to sched_fork(). / Alternatively we could
5117	 * use task_rq_lock() here and obtain the other rq->lock.
5118	 *
5119	 * Silence PROVE_RCU
5120	 */
5121	rcu_read_lock();
5122	__set_task_cpu(idle, cpu);
5123	rcu_read_unlock();
5124
5125	rq->curr = rq->idle = idle;
5126#if defined(CONFIG_SMP)
5127	idle->on_cpu = 1;
5128#endif
5129	raw_spin_unlock_irqrestore(&rq->lock, flags);
5130
5131	/* Set the preempt count _outside_ the spinlocks! */
5132	task_thread_info(idle)->preempt_count = 0;
5133
5134	/*
5135	 * The idle tasks have their own, simple scheduling class:
5136	 */
5137	idle->sched_class = &idle_sched_class;
5138	ftrace_graph_init_idle_task(idle, cpu);
5139#if defined(CONFIG_SMP)
5140	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5141#endif
5142}
5143
5144#ifdef CONFIG_SMP
5145void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
5146{
5147	if (p->sched_class && p->sched_class->set_cpus_allowed)
5148		p->sched_class->set_cpus_allowed(p, new_mask);
5149
5150	cpumask_copy(&p->cpus_allowed, new_mask);
5151	p->nr_cpus_allowed = cpumask_weight(new_mask);
5152}
5153
5154/*
5155 * This is how migration works:
5156 *
5157 * 1) we invoke migration_cpu_stop() on the target CPU using
5158 *    stop_one_cpu().
5159 * 2) stopper starts to run (implicitly forcing the migrated thread
5160 *    off the CPU)
5161 * 3) it checks whether the migrated task is still in the wrong runqueue.
5162 * 4) if it's in the wrong runqueue then the migration thread removes
5163 *    it and puts it into the right queue.
5164 * 5) stopper completes and stop_one_cpu() returns and the migration
5165 *    is done.
5166 */
5167
5168/*
5169 * Change a given task's CPU affinity. Migrate the thread to a
5170 * proper CPU and schedule it away if the CPU it's executing on
5171 * is removed from the allowed bitmask.
5172 *
5173 * NOTE: the caller must have a valid reference to the task, the
5174 * task must not exit() & deallocate itself prematurely. The
5175 * call is not atomic; no spinlocks may be held.
5176 */
5177int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5178{
5179	unsigned long flags;
5180	struct rq *rq;
5181	unsigned int dest_cpu;
5182	int ret = 0;
5183
5184	rq = task_rq_lock(p, &flags);
5185
5186	if (cpumask_equal(&p->cpus_allowed, new_mask))
5187		goto out;
5188
5189	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
5190		ret = -EINVAL;
5191		goto out;
5192	}
5193
5194	if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
5195		ret = -EINVAL;
5196		goto out;
5197	}
5198
5199	do_set_cpus_allowed(p, new_mask);
5200
5201	/* Can the task run on the task's current CPU? If so, we're done */
5202	if (cpumask_test_cpu(task_cpu(p), new_mask))
5203		goto out;
5204
5205	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5206	if (p->on_rq) {
5207		struct migration_arg arg = { p, dest_cpu };
5208		/* Need help from migration thread: drop lock and wait. */
5209		task_rq_unlock(rq, p, &flags);
5210		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5211		tlb_migrate_finish(p->mm);
5212		return 0;
5213	}
5214out:
5215	task_rq_unlock(rq, p, &flags);
5216
5217	return ret;
5218}
5219EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
5220
5221/*
5222 * Move (not current) task off this cpu, onto dest cpu. We're doing
5223 * this because either it can't run here any more (set_cpus_allowed()
5224 * away from this CPU, or CPU going down), or because we're
5225 * attempting to rebalance this task on exec (sched_exec).
5226 *
5227 * So we race with normal scheduler movements, but that's OK, as long
5228 * as the task is no longer on this CPU.
5229 *
5230 * Returns non-zero if task was successfully migrated.
5231 */
5232static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5233{
5234	struct rq *rq_dest, *rq_src;
5235	int ret = 0;
5236
5237	if (unlikely(!cpu_active(dest_cpu)))
5238		return ret;
5239
5240	rq_src = cpu_rq(src_cpu);
5241	rq_dest = cpu_rq(dest_cpu);
5242
5243	raw_spin_lock(&p->pi_lock);
5244	double_rq_lock(rq_src, rq_dest);
5245	/* Already moved. */
5246	if (task_cpu(p) != src_cpu)
5247		goto done;
5248	/* Affinity changed (again). */
5249	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5250		goto fail;
5251
5252	/*
5253	 * If we're not on a rq, the next wake-up will ensure we're
5254	 * placed properly.
5255	 */
5256	if (p->on_rq) {
5257		dequeue_task(rq_src, p, 0);
5258		set_task_cpu(p, dest_cpu);
5259		enqueue_task(rq_dest, p, 0);
5260		check_preempt_curr(rq_dest, p, 0);
5261	}
5262done:
5263	ret = 1;
5264fail:
5265	double_rq_unlock(rq_src, rq_dest);
5266	raw_spin_unlock(&p->pi_lock);
5267	return ret;
5268}
5269
5270/*
5271 * migration_cpu_stop - this will be executed by a highprio stopper thread
5272 * and performs thread migration by bumping thread off CPU then
5273 * 'pushing' onto another runqueue.
5274 */
5275static int migration_cpu_stop(void *data)
5276{
5277	struct migration_arg *arg = data;
5278
5279	/*
5280	 * The original target cpu might have gone down and we might
5281	 * be on another cpu but it doesn't matter.
5282	 */
5283	local_irq_disable();
5284	__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5285	local_irq_enable();
5286	return 0;
5287}
5288
5289#ifdef CONFIG_HOTPLUG_CPU
5290
5291/*
5292 * Ensures that the idle task is using init_mm right before its cpu goes
5293 * offline.
5294 */
5295void idle_task_exit(void)
5296{
5297	struct mm_struct *mm = current->active_mm;
5298
5299	BUG_ON(cpu_online(smp_processor_id()));
5300
5301	if (mm != &init_mm)
5302		switch_mm(mm, &init_mm, current);
5303	mmdrop(mm);
5304}
5305
5306/*
5307 * Since this CPU is going 'away' for a while, fold any nr_active delta
5308 * we might have. Assumes we're called after migrate_tasks() so that the
5309 * nr_active count is stable.
5310 *
5311 * Also see the comment "Global load-average calculations".
5312 */
5313static void calc_load_migrate(struct rq *rq)
5314{
5315	long delta = calc_load_fold_active(rq);
5316	if (delta)
5317		atomic_long_add(delta, &calc_load_tasks);
5318}
5319
5320/*
5321 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5322 * try_to_wake_up()->select_task_rq().
5323 *
5324 * Called with rq->lock held even though we'er in stop_machine() and
5325 * there's no concurrency possible, we hold the required locks anyway
5326 * because of lock validation efforts.
5327 */
5328static void migrate_tasks(unsigned int dead_cpu)
5329{
5330	struct rq *rq = cpu_rq(dead_cpu);
5331	struct task_struct *next, *stop = rq->stop;
5332	int dest_cpu;
5333
5334	/*
5335	 * Fudge the rq selection such that the below task selection loop
5336	 * doesn't get stuck on the currently eligible stop task.
5337	 *
5338	 * We're currently inside stop_machine() and the rq is either stuck
5339	 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5340	 * either way we should never end up calling schedule() until we're
5341	 * done here.
5342	 */
5343	rq->stop = NULL;
5344
5345	/* Ensure any throttled groups are reachable by pick_next_task */
5346	unthrottle_offline_cfs_rqs(rq);
5347
5348	for ( ; ; ) {
5349		/*
5350		 * There's this thread running, bail when that's the only
5351		 * remaining thread.
5352		 */
5353		if (rq->nr_running == 1)
5354			break;
5355
5356		next = pick_next_task(rq);
5357		BUG_ON(!next);
5358		next->sched_class->put_prev_task(rq, next);
5359
5360		/* Find suitable destination for @next, with force if needed. */
5361		dest_cpu = select_fallback_rq(dead_cpu, next);
5362		raw_spin_unlock(&rq->lock);
5363
5364		__migrate_task(next, dead_cpu, dest_cpu);
5365
5366		raw_spin_lock(&rq->lock);
5367	}
5368
5369	rq->stop = stop;
5370}
5371
5372#endif /* CONFIG_HOTPLUG_CPU */
5373
5374#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5375
5376static struct ctl_table sd_ctl_dir[] = {
5377	{
5378		.procname	= "sched_domain",
5379		.mode		= 0555,
5380	},
5381	{}
5382};
5383
5384static struct ctl_table sd_ctl_root[] = {
5385	{
5386		.procname	= "kernel",
5387		.mode		= 0555,
5388		.child		= sd_ctl_dir,
5389	},
5390	{}
5391};
5392
5393static struct ctl_table *sd_alloc_ctl_entry(int n)
5394{
5395	struct ctl_table *entry =
5396		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
5397
5398	return entry;
5399}
5400
5401static void sd_free_ctl_entry(struct ctl_table **tablep)
5402{
5403	struct ctl_table *entry;
5404
5405	/*
5406	 * In the intermediate directories, both the child directory and
5407	 * procname are dynamically allocated and could fail but the mode
5408	 * will always be set. In the lowest directory the names are
5409	 * static strings and all have proc handlers.
5410	 */
5411	for (entry = *tablep; entry->mode; entry++) {
5412		if (entry->child)
5413			sd_free_ctl_entry(&entry->child);
5414		if (entry->proc_handler == NULL)
5415			kfree(entry->procname);
5416	}
5417
5418	kfree(*tablep);
5419	*tablep = NULL;
5420}
5421
5422static void
5423set_table_entry(struct ctl_table *entry,
5424		const char *procname, void *data, int maxlen,
5425		umode_t mode, proc_handler *proc_handler)
5426{
5427	entry->procname = procname;
5428	entry->data = data;
5429	entry->maxlen = maxlen;
5430	entry->mode = mode;
5431	entry->proc_handler = proc_handler;
5432}
5433
5434static struct ctl_table *
5435sd_alloc_ctl_domain_table(struct sched_domain *sd)
5436{
5437	struct ctl_table *table = sd_alloc_ctl_entry(13);
5438
5439	if (table == NULL)
5440		return NULL;
5441
5442	set_table_entry(&table[0], "min_interval", &sd->min_interval,
5443		sizeof(long), 0644, proc_doulongvec_minmax);
5444	set_table_entry(&table[1], "max_interval", &sd->max_interval,
5445		sizeof(long), 0644, proc_doulongvec_minmax);
5446	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
5447		sizeof(int), 0644, proc_dointvec_minmax);
5448	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
5449		sizeof(int), 0644, proc_dointvec_minmax);
5450	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
5451		sizeof(int), 0644, proc_dointvec_minmax);
5452	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
5453		sizeof(int), 0644, proc_dointvec_minmax);
5454	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
5455		sizeof(int), 0644, proc_dointvec_minmax);
5456	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
5457		sizeof(int), 0644, proc_dointvec_minmax);
5458	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
5459		sizeof(int), 0644, proc_dointvec_minmax);
5460	set_table_entry(&table[9], "cache_nice_tries",
5461		&sd->cache_nice_tries,
5462		sizeof(int), 0644, proc_dointvec_minmax);
5463	set_table_entry(&table[10], "flags", &sd->flags,
5464		sizeof(int), 0644, proc_dointvec_minmax);
5465	set_table_entry(&table[11], "name", sd->name,
5466		CORENAME_MAX_SIZE, 0444, proc_dostring);
5467	/* &table[12] is terminator */
5468
5469	return table;
5470}
5471
5472static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5473{
5474	struct ctl_table *entry, *table;
5475	struct sched_domain *sd;
5476	int domain_num = 0, i;
5477	char buf[32];
5478
5479	for_each_domain(cpu, sd)
5480		domain_num++;
5481	entry = table = sd_alloc_ctl_entry(domain_num + 1);
5482	if (table == NULL)
5483		return NULL;
5484
5485	i = 0;
5486	for_each_domain(cpu, sd) {
5487		snprintf(buf, 32, "domain%d", i);
5488		entry->procname = kstrdup(buf, GFP_KERNEL);
5489		entry->mode = 0555;
5490		entry->child = sd_alloc_ctl_domain_table(sd);
5491		entry++;
5492		i++;
5493	}
5494	return table;
5495}
5496
5497static struct ctl_table_header *sd_sysctl_header;
5498static void register_sched_domain_sysctl(void)
5499{
5500	int i, cpu_num = num_possible_cpus();
5501	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5502	char buf[32];
5503
5504	WARN_ON(sd_ctl_dir[0].child);
5505	sd_ctl_dir[0].child = entry;
5506
5507	if (entry == NULL)
5508		return;
5509
5510	for_each_possible_cpu(i) {
5511		snprintf(buf, 32, "cpu%d", i);
5512		entry->procname = kstrdup(buf, GFP_KERNEL);
5513		entry->mode = 0555;
5514		entry->child = sd_alloc_ctl_cpu_table(i);
5515		entry++;
5516	}
5517
5518	WARN_ON(sd_sysctl_header);
5519	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5520}
5521
5522/* may be called multiple times per register */
5523static void unregister_sched_domain_sysctl(void)
5524{
5525	if (sd_sysctl_header)
5526		unregister_sysctl_table(sd_sysctl_header);
5527	sd_sysctl_header = NULL;
5528	if (sd_ctl_dir[0].child)
5529		sd_free_ctl_entry(&sd_ctl_dir[0].child);
5530}
5531#else
5532static void register_sched_domain_sysctl(void)
5533{
5534}
5535static void unregister_sched_domain_sysctl(void)
5536{
5537}
5538#endif
5539
5540static void set_rq_online(struct rq *rq)
5541{
5542	if (!rq->online) {
5543		const struct sched_class *class;
5544
5545		cpumask_set_cpu(rq->cpu, rq->rd->online);
5546		rq->online = 1;
5547
5548		for_each_class(class) {
5549			if (class->rq_online)
5550				class->rq_online(rq);
5551		}
5552	}
5553}
5554
5555static void set_rq_offline(struct rq *rq)
5556{
5557	if (rq->online) {
5558		const struct sched_class *class;
5559
5560		for_each_class(class) {
5561			if (class->rq_offline)
5562				class->rq_offline(rq);
5563		}
5564
5565		cpumask_clear_cpu(rq->cpu, rq->rd->online);
5566		rq->online = 0;
5567	}
5568}
5569
5570/*
5571 * migration_call - callback that gets triggered when a CPU is added.
5572 * Here we can start up the necessary migration thread for the new CPU.
5573 */
5574static int __cpuinit
5575migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5576{
5577	int cpu = (long)hcpu;
5578	unsigned long flags;
5579	struct rq *rq = cpu_rq(cpu);
5580
5581	switch (action & ~CPU_TASKS_FROZEN) {
5582
5583	case CPU_UP_PREPARE:
5584		rq->calc_load_update = calc_load_update;
5585		break;
5586
5587	case CPU_ONLINE:
5588		/* Update our root-domain */
5589		raw_spin_lock_irqsave(&rq->lock, flags);
5590		if (rq->rd) {
5591			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5592
5593			set_rq_online(rq);
5594		}
5595		raw_spin_unlock_irqrestore(&rq->lock, flags);
5596		break;
5597
5598#ifdef CONFIG_HOTPLUG_CPU
5599	case CPU_DYING:
5600		sched_ttwu_pending();
5601		/* Update our root-domain */
5602		raw_spin_lock_irqsave(&rq->lock, flags);
5603		if (rq->rd) {
5604			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5605			set_rq_offline(rq);
5606		}
5607		migrate_tasks(cpu);
5608		BUG_ON(rq->nr_running != 1); /* the migration thread */
5609		raw_spin_unlock_irqrestore(&rq->lock, flags);
5610
5611		calc_load_migrate(rq);
5612		break;
5613#endif
5614	}
5615
5616	update_max_interval();
5617
5618	return NOTIFY_OK;
5619}
5620
5621/*
5622 * Register at high priority so that task migration (migrate_all_tasks)
5623 * happens before everything else.  This has to be lower priority than
5624 * the notifier in the perf_event subsystem, though.
5625 */
5626static struct notifier_block __cpuinitdata migration_notifier = {
5627	.notifier_call = migration_call,
5628	.priority = CPU_PRI_MIGRATION,
5629};
5630
5631static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5632				      unsigned long action, void *hcpu)
5633{
5634	switch (action & ~CPU_TASKS_FROZEN) {
5635	case CPU_STARTING:
5636	case CPU_DOWN_FAILED:
5637		set_cpu_active((long)hcpu, true);
5638		return NOTIFY_OK;
5639	default:
5640		return NOTIFY_DONE;
5641	}
5642}
5643
5644static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
5645					unsigned long action, void *hcpu)
5646{
5647	switch (action & ~CPU_TASKS_FROZEN) {
5648	case CPU_DOWN_PREPARE:
5649		set_cpu_active((long)hcpu, false);
5650		return NOTIFY_OK;
5651	default:
5652		return NOTIFY_DONE;
5653	}
5654}
5655
5656static int __init migration_init(void)
5657{
5658	void *cpu = (void *)(long)smp_processor_id();
5659	int err;
5660
5661	/* Initialize migration for the boot CPU */
5662	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5663	BUG_ON(err == NOTIFY_BAD);
5664	migration_call(&migration_notifier, CPU_ONLINE, cpu);
5665	register_cpu_notifier(&migration_notifier);
5666
5667	/* Register cpu active notifiers */
5668	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5669	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5670
5671	return 0;
5672}
5673early_initcall(migration_init);
5674#endif
5675
5676#ifdef CONFIG_SMP
5677
5678static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5679
5680#ifdef CONFIG_SCHED_DEBUG
5681
5682static __read_mostly int sched_debug_enabled;
5683
5684static int __init sched_debug_setup(char *str)
5685{
5686	sched_debug_enabled = 1;
5687
5688	return 0;
5689}
5690early_param("sched_debug", sched_debug_setup);
5691
5692static inline bool sched_debug(void)
5693{
5694	return sched_debug_enabled;
5695}
5696
5697static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5698				  struct cpumask *groupmask)
5699{
5700	struct sched_group *group = sd->groups;
5701	char str[256];
5702
5703	cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
5704	cpumask_clear(groupmask);
5705
5706	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5707
5708	if (!(sd->flags & SD_LOAD_BALANCE)) {
5709		printk("does not load-balance\n");
5710		if (sd->parent)
5711			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5712					" has parent");
5713		return -1;
5714	}
5715
5716	printk(KERN_CONT "span %s level %s\n", str, sd->name);
5717
5718	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5719		printk(KERN_ERR "ERROR: domain->span does not contain "
5720				"CPU%d\n", cpu);
5721	}
5722	if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5723		printk(KERN_ERR "ERROR: domain->groups does not contain"
5724				" CPU%d\n", cpu);
5725	}
5726
5727	printk(KERN_DEBUG "%*s groups:", level + 1, "");
5728	do {
5729		if (!group) {
5730			printk("\n");
5731			printk(KERN_ERR "ERROR: group is NULL\n");
5732			break;
5733		}
5734
5735		/*
5736		 * Even though we initialize ->power to something semi-sane,
5737		 * we leave power_orig unset. This allows us to detect if
5738		 * domain iteration is still funny without causing /0 traps.
5739		 */
5740		if (!group->sgp->power_orig) {
5741			printk(KERN_CONT "\n");
5742			printk(KERN_ERR "ERROR: domain->cpu_power not "
5743					"set\n");
5744			break;
5745		}
5746
5747		if (!cpumask_weight(sched_group_cpus(group))) {
5748			printk(KERN_CONT "\n");
5749			printk(KERN_ERR "ERROR: empty group\n");
5750			break;
5751		}
5752
5753		if (!(sd->flags & SD_OVERLAP) &&
5754		    cpumask_intersects(groupmask, sched_group_cpus(group))) {
5755			printk(KERN_CONT "\n");
5756			printk(KERN_ERR "ERROR: repeated CPUs\n");
5757			break;
5758		}
5759
5760		cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5761
5762		cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
5763
5764		printk(KERN_CONT " %s", str);
5765		if (group->sgp->power != SCHED_POWER_SCALE) {
5766			printk(KERN_CONT " (cpu_power = %d)",
5767				group->sgp->power);
5768		}
5769
5770		group = group->next;
5771	} while (group != sd->groups);
5772	printk(KERN_CONT "\n");
5773
5774	if (!cpumask_equal(sched_domain_span(sd), groupmask))
5775		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5776
5777	if (sd->parent &&
5778	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5779		printk(KERN_ERR "ERROR: parent span is not a superset "
5780			"of domain->span\n");
5781	return 0;
5782}
5783
5784static void sched_domain_debug(struct sched_domain *sd, int cpu)
5785{
5786	int level = 0;
5787
5788	if (!sched_debug_enabled)
5789		return;
5790
5791	if (!sd) {
5792		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5793		return;
5794	}
5795
5796	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5797
5798	for (;;) {
5799		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5800			break;
5801		level++;
5802		sd = sd->parent;
5803		if (!sd)
5804			break;
5805	}
5806}
5807#else /* !CONFIG_SCHED_DEBUG */
5808# define sched_domain_debug(sd, cpu) do { } while (0)
5809static inline bool sched_debug(void)
5810{
5811	return false;
5812}
5813#endif /* CONFIG_SCHED_DEBUG */
5814
5815static int sd_degenerate(struct sched_domain *sd)
5816{
5817	if (cpumask_weight(sched_domain_span(sd)) == 1)
5818		return 1;
5819
5820	/* Following flags need at least 2 groups */
5821	if (sd->flags & (SD_LOAD_BALANCE |
5822			 SD_BALANCE_NEWIDLE |
5823			 SD_BALANCE_FORK |
5824			 SD_BALANCE_EXEC |
5825			 SD_SHARE_CPUPOWER |
5826			 SD_SHARE_PKG_RESOURCES)) {
5827		if (sd->groups != sd->groups->next)
5828			return 0;
5829	}
5830
5831	/* Following flags don't use groups */
5832	if (sd->flags & (SD_WAKE_AFFINE))
5833		return 0;
5834
5835	return 1;
5836}
5837
5838static int
5839sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5840{
5841	unsigned long cflags = sd->flags, pflags = parent->flags;
5842
5843	if (sd_degenerate(parent))
5844		return 1;
5845
5846	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5847		return 0;
5848
5849	/* Flags needing groups don't count if only 1 group in parent */
5850	if (parent->groups == parent->groups->next) {
5851		pflags &= ~(SD_LOAD_BALANCE |
5852				SD_BALANCE_NEWIDLE |
5853				SD_BALANCE_FORK |
5854				SD_BALANCE_EXEC |
5855				SD_SHARE_CPUPOWER |
5856				SD_SHARE_PKG_RESOURCES);
5857		if (nr_node_ids == 1)
5858			pflags &= ~SD_SERIALIZE;
5859	}
5860	if (~cflags & pflags)
5861		return 0;
5862
5863	return 1;
5864}
5865
5866static void free_rootdomain(struct rcu_head *rcu)
5867{
5868	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5869
5870	cpupri_cleanup(&rd->cpupri);
5871	free_cpumask_var(rd->rto_mask);
5872	free_cpumask_var(rd->online);
5873	free_cpumask_var(rd->span);
5874	kfree(rd);
5875}
5876
5877static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5878{
5879	struct root_domain *old_rd = NULL;
5880	unsigned long flags;
5881
5882	raw_spin_lock_irqsave(&rq->lock, flags);
5883
5884	if (rq->rd) {
5885		old_rd = rq->rd;
5886
5887		if (cpumask_test_cpu(rq->cpu, old_rd->online))
5888			set_rq_offline(rq);
5889
5890		cpumask_clear_cpu(rq->cpu, old_rd->span);
5891
5892		/*
5893		 * If we dont want to free the old_rt yet then
5894		 * set old_rd to NULL to skip the freeing later
5895		 * in this function:
5896		 */
5897		if (!atomic_dec_and_test(&old_rd->refcount))
5898			old_rd = NULL;
5899	}
5900
5901	atomic_inc(&rd->refcount);
5902	rq->rd = rd;
5903
5904	cpumask_set_cpu(rq->cpu, rd->span);
5905	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5906		set_rq_online(rq);
5907
5908	raw_spin_unlock_irqrestore(&rq->lock, flags);
5909
5910	if (old_rd)
5911		call_rcu_sched(&old_rd->rcu, free_rootdomain);
5912}
5913
5914static int init_rootdomain(struct root_domain *rd)
5915{
5916	memset(rd, 0, sizeof(*rd));
5917
5918	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
5919		goto out;
5920	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
5921		goto free_span;
5922	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5923		goto free_online;
5924
5925	if (cpupri_init(&rd->cpupri) != 0)
5926		goto free_rto_mask;
5927	return 0;
5928
5929free_rto_mask:
5930	free_cpumask_var(rd->rto_mask);
5931free_online:
5932	free_cpumask_var(rd->online);
5933free_span:
5934	free_cpumask_var(rd->span);
5935out:
5936	return -ENOMEM;
5937}
5938
5939/*
5940 * By default the system creates a single root-domain with all cpus as
5941 * members (mimicking the global state we have today).
5942 */
5943struct root_domain def_root_domain;
5944
5945static void init_defrootdomain(void)
5946{
5947	init_rootdomain(&def_root_domain);
5948
5949	atomic_set(&def_root_domain.refcount, 1);
5950}
5951
5952static struct root_domain *alloc_rootdomain(void)
5953{
5954	struct root_domain *rd;
5955
5956	rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5957	if (!rd)
5958		return NULL;
5959
5960	if (init_rootdomain(rd) != 0) {
5961		kfree(rd);
5962		return NULL;
5963	}
5964
5965	return rd;
5966}
5967
5968static void free_sched_groups(struct sched_group *sg, int free_sgp)
5969{
5970	struct sched_group *tmp, *first;
5971
5972	if (!sg)
5973		return;
5974
5975	first = sg;
5976	do {
5977		tmp = sg->next;
5978
5979		if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5980			kfree(sg->sgp);
5981
5982		kfree(sg);
5983		sg = tmp;
5984	} while (sg != first);
5985}
5986
5987static void free_sched_domain(struct rcu_head *rcu)
5988{
5989	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5990
5991	/*
5992	 * If its an overlapping domain it has private groups, iterate and
5993	 * nuke them all.
5994	 */
5995	if (sd->flags & SD_OVERLAP) {
5996		free_sched_groups(sd->groups, 1);
5997	} else if (atomic_dec_and_test(&sd->groups->ref)) {
5998		kfree(sd->groups->sgp);
5999		kfree(sd->groups);
6000	}
6001	kfree(sd);
6002}
6003
6004static void destroy_sched_domain(struct sched_domain *sd, int cpu)
6005{
6006	call_rcu(&sd->rcu, free_sched_domain);
6007}
6008
6009static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6010{
6011	for (; sd; sd = sd->parent)
6012		destroy_sched_domain(sd, cpu);
6013}
6014
6015/*
6016 * Keep a special pointer to the highest sched_domain that has
6017 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
6018 * allows us to avoid some pointer chasing select_idle_sibling().
6019 *
6020 * Iterate domains and sched_groups downward, assigning CPUs to be
6021 * select_idle_sibling() hw buddy.  Cross-wiring hw makes bouncing
6022 * due to random perturbation self canceling, ie sw buddies pull
6023 * their counterpart to their CPU's hw counterpart.
6024 *
6025 * Also keep a unique ID per domain (we use the first cpu number in
6026 * the cpumask of the domain), this allows us to quickly tell if
6027 * two cpus are in the same cache domain, see cpus_share_cache().
6028 */
6029DEFINE_PER_CPU(struct sched_domain *, sd_llc);
6030DEFINE_PER_CPU(int, sd_llc_id);
6031
6032static void update_top_cache_domain(int cpu)
6033{
6034	struct sched_domain *sd;
6035	int id = cpu;
6036
6037	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
6038	if (sd) {
6039		struct sched_domain *tmp = sd;
6040		struct sched_group *sg, *prev;
6041		bool right;
6042
6043		/*
6044		 * Traverse to first CPU in group, and count hops
6045		 * to cpu from there, switching direction on each
6046		 * hop, never ever pointing the last CPU rightward.
6047		 */
6048		do {
6049			id = cpumask_first(sched_domain_span(tmp));
6050			prev = sg = tmp->groups;
6051			right = 1;
6052
6053			while (cpumask_first(sched_group_cpus(sg)) != id)
6054				sg = sg->next;
6055
6056			while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
6057				prev = sg;
6058				sg = sg->next;
6059				right = !right;
6060			}
6061
6062			/* A CPU went down, never point back to domain start. */
6063			if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
6064				right = false;
6065
6066			sg = right ? sg->next : prev;
6067			tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
6068		} while ((tmp = tmp->child));
6069
6070		id = cpumask_first(sched_domain_span(sd));
6071	}
6072
6073	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
6074	per_cpu(sd_llc_id, cpu) = id;
6075}
6076
6077/*
6078 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
6079 * hold the hotplug lock.
6080 */
6081static void
6082cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6083{
6084	struct rq *rq = cpu_rq(cpu);
6085	struct sched_domain *tmp;
6086
6087	/* Remove the sched domains which do not contribute to scheduling. */
6088	for (tmp = sd; tmp; ) {
6089		struct sched_domain *parent = tmp->parent;
6090		if (!parent)
6091			break;
6092
6093		if (sd_parent_degenerate(tmp, parent)) {
6094			tmp->parent = parent->parent;
6095			if (parent->parent)
6096				parent->parent->child = tmp;
6097			destroy_sched_domain(parent, cpu);
6098		} else
6099			tmp = tmp->parent;
6100	}
6101
6102	if (sd && sd_degenerate(sd)) {
6103		tmp = sd;
6104		sd = sd->parent;
6105		destroy_sched_domain(tmp, cpu);
6106		if (sd)
6107			sd->child = NULL;
6108	}
6109
6110	sched_domain_debug(sd, cpu);
6111
6112	rq_attach_root(rq, rd);
6113	tmp = rq->sd;
6114	rcu_assign_pointer(rq->sd, sd);
6115	destroy_sched_domains(tmp, cpu);
6116
6117	update_top_cache_domain(cpu);
6118}
6119
6120/* cpus with isolated domains */
6121static cpumask_var_t cpu_isolated_map;
6122
6123/* Setup the mask of cpus configured for isolated domains */
6124static int __init isolated_cpu_setup(char *str)
6125{
6126	alloc_bootmem_cpumask_var(&cpu_isolated_map);
6127	cpulist_parse(str, cpu_isolated_map);
6128	return 1;
6129}
6130
6131__setup("isolcpus=", isolated_cpu_setup);
6132
6133static const struct cpumask *cpu_cpu_mask(int cpu)
6134{
6135	return cpumask_of_node(cpu_to_node(cpu));
6136}
6137
6138struct sd_data {
6139	struct sched_domain **__percpu sd;
6140	struct sched_group **__percpu sg;
6141	struct sched_group_power **__percpu sgp;
6142};
6143
6144struct s_data {
6145	struct sched_domain ** __percpu sd;
6146	struct root_domain	*rd;
6147};
6148
6149enum s_alloc {
6150	sa_rootdomain,
6151	sa_sd,
6152	sa_sd_storage,
6153	sa_none,
6154};
6155
6156struct sched_domain_topology_level;
6157
6158typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
6159typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
6160
6161#define SDTL_OVERLAP	0x01
6162
6163struct sched_domain_topology_level {
6164	sched_domain_init_f init;
6165	sched_domain_mask_f mask;
6166	int		    flags;
6167	int		    numa_level;
6168	struct sd_data      data;
6169};
6170
6171/*
6172 * Build an iteration mask that can exclude certain CPUs from the upwards
6173 * domain traversal.
6174 *
6175 * Asymmetric node setups can result in situations where the domain tree is of
6176 * unequal depth, make sure to skip domains that already cover the entire
6177 * range.
6178 *
6179 * In that case build_sched_domains() will have terminated the iteration early
6180 * and our sibling sd spans will be empty. Domains should always include the
6181 * cpu they're built on, so check that.
6182 *
6183 */
6184static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6185{
6186	const struct cpumask *span = sched_domain_span(sd);
6187	struct sd_data *sdd = sd->private;
6188	struct sched_domain *sibling;
6189	int i;
6190
6191	for_each_cpu(i, span) {
6192		sibling = *per_cpu_ptr(sdd->sd, i);
6193		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6194			continue;
6195
6196		cpumask_set_cpu(i, sched_group_mask(sg));
6197	}
6198}
6199
6200/*
6201 * Return the canonical balance cpu for this group, this is the first cpu
6202 * of this group that's also in the iteration mask.
6203 */
6204int group_balance_cpu(struct sched_group *sg)
6205{
6206	return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6207}
6208
6209static int
6210build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6211{
6212	struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
6213	const struct cpumask *span = sched_domain_span(sd);
6214	struct cpumask *covered = sched_domains_tmpmask;
6215	struct sd_data *sdd = sd->private;
6216	struct sched_domain *child;
6217	int i;
6218
6219	cpumask_clear(covered);
6220
6221	for_each_cpu(i, span) {
6222		struct cpumask *sg_span;
6223
6224		if (cpumask_test_cpu(i, covered))
6225			continue;
6226
6227		child = *per_cpu_ptr(sdd->sd, i);
6228
6229		/* See the comment near build_group_mask(). */
6230		if (!cpumask_test_cpu(i, sched_domain_span(child)))
6231			continue;
6232
6233		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6234				GFP_KERNEL, cpu_to_node(cpu));
6235
6236		if (!sg)
6237			goto fail;
6238
6239		sg_span = sched_group_cpus(sg);
6240		if (child->child) {
6241			child = child->child;
6242			cpumask_copy(sg_span, sched_domain_span(child));
6243		} else
6244			cpumask_set_cpu(i, sg_span);
6245
6246		cpumask_or(covered, covered, sg_span);
6247
6248		sg->sgp = *per_cpu_ptr(sdd->sgp, i);
6249		if (atomic_inc_return(&sg->sgp->ref) == 1)
6250			build_group_mask(sd, sg);
6251
6252		/*
6253		 * Initialize sgp->power such that even if we mess up the
6254		 * domains and no possible iteration will get us here, we won't
6255		 * die on a /0 trap.
6256		 */
6257		sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
6258
6259		/*
6260		 * Make sure the first group of this domain contains the
6261		 * canonical balance cpu. Otherwise the sched_domain iteration
6262		 * breaks. See update_sg_lb_stats().
6263		 */
6264		if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
6265		    group_balance_cpu(sg) == cpu)
6266			groups = sg;
6267
6268		if (!first)
6269			first = sg;
6270		if (last)
6271			last->next = sg;
6272		last = sg;
6273		last->next = first;
6274	}
6275	sd->groups = groups;
6276
6277	return 0;
6278
6279fail:
6280	free_sched_groups(first, 0);
6281
6282	return -ENOMEM;
6283}
6284
6285static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6286{
6287	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6288	struct sched_domain *child = sd->child;
6289
6290	if (child)
6291		cpu = cpumask_first(sched_domain_span(child));
6292
6293	if (sg) {
6294		*sg = *per_cpu_ptr(sdd->sg, cpu);
6295		(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
6296		atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
6297	}
6298
6299	return cpu;
6300}
6301
6302/*
6303 * build_sched_groups will build a circular linked list of the groups
6304 * covered by the given span, and will set each group's ->cpumask correctly,
6305 * and ->cpu_power to 0.
6306 *
6307 * Assumes the sched_domain tree is fully constructed
6308 */
6309static int
6310build_sched_groups(struct sched_domain *sd, int cpu)
6311{
6312	struct sched_group *first = NULL, *last = NULL;
6313	struct sd_data *sdd = sd->private;
6314	const struct cpumask *span = sched_domain_span(sd);
6315	struct cpumask *covered;
6316	int i;
6317
6318	get_group(cpu, sdd, &sd->groups);
6319	atomic_inc(&sd->groups->ref);
6320
6321	if (cpu != cpumask_first(sched_domain_span(sd)))
6322		return 0;
6323
6324	lockdep_assert_held(&sched_domains_mutex);
6325	covered = sched_domains_tmpmask;
6326
6327	cpumask_clear(covered);
6328
6329	for_each_cpu(i, span) {
6330		struct sched_group *sg;
6331		int group = get_group(i, sdd, &sg);
6332		int j;
6333
6334		if (cpumask_test_cpu(i, covered))
6335			continue;
6336
6337		cpumask_clear(sched_group_cpus(sg));
6338		sg->sgp->power = 0;
6339		cpumask_setall(sched_group_mask(sg));
6340
6341		for_each_cpu(j, span) {
6342			if (get_group(j, sdd, NULL) != group)
6343				continue;
6344
6345			cpumask_set_cpu(j, covered);
6346			cpumask_set_cpu(j, sched_group_cpus(sg));
6347		}
6348
6349		if (!first)
6350			first = sg;
6351		if (last)
6352			last->next = sg;
6353		last = sg;
6354	}
6355	last->next = first;
6356
6357	return 0;
6358}
6359
6360/*
6361 * Initialize sched groups cpu_power.
6362 *
6363 * cpu_power indicates the capacity of sched group, which is used while
6364 * distributing the load between different sched groups in a sched domain.
6365 * Typically cpu_power for all the groups in a sched domain will be same unless
6366 * there are asymmetries in the topology. If there are asymmetries, group
6367 * having more cpu_power will pickup more load compared to the group having
6368 * less cpu_power.
6369 */
6370static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6371{
6372	struct sched_group *sg = sd->groups;
6373
6374	WARN_ON(!sd || !sg);
6375
6376	do {
6377		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6378		sg = sg->next;
6379	} while (sg != sd->groups);
6380
6381	if (cpu != group_balance_cpu(sg))
6382		return;
6383
6384	update_group_power(sd, cpu);
6385	atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
6386}
6387
6388int __weak arch_sd_sibling_asym_packing(void)
6389{
6390       return 0*SD_ASYM_PACKING;
6391}
6392
6393/*
6394 * Initializers for schedule domains
6395 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6396 */
6397
6398#ifdef CONFIG_SCHED_DEBUG
6399# define SD_INIT_NAME(sd, type)		sd->name = #type
6400#else
6401# define SD_INIT_NAME(sd, type)		do { } while (0)
6402#endif
6403
6404#define SD_INIT_FUNC(type)						\
6405static noinline struct sched_domain *					\
6406sd_init_##type(struct sched_domain_topology_level *tl, int cpu) 	\
6407{									\
6408	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);	\
6409	*sd = SD_##type##_INIT;						\
6410	SD_INIT_NAME(sd, type);						\
6411	sd->private = &tl->data;					\
6412	return sd;							\
6413}
6414
6415SD_INIT_FUNC(CPU)
6416#ifdef CONFIG_SCHED_SMT
6417 SD_INIT_FUNC(SIBLING)
6418#endif
6419#ifdef CONFIG_SCHED_MC
6420 SD_INIT_FUNC(MC)
6421#endif
6422#ifdef CONFIG_SCHED_BOOK
6423 SD_INIT_FUNC(BOOK)
6424#endif
6425
6426static int default_relax_domain_level = -1;
6427int sched_domain_level_max;
6428
6429static int __init setup_relax_domain_level(char *str)
6430{
6431	if (kstrtoint(str, 0, &default_relax_domain_level))
6432		pr_warn("Unable to set relax_domain_level\n");
6433
6434	return 1;
6435}
6436__setup("relax_domain_level=", setup_relax_domain_level);
6437
6438static void set_domain_attribute(struct sched_domain *sd,
6439				 struct sched_domain_attr *attr)
6440{
6441	int request;
6442
6443	if (!attr || attr->relax_domain_level < 0) {
6444		if (default_relax_domain_level < 0)
6445			return;
6446		else
6447			request = default_relax_domain_level;
6448	} else
6449		request = attr->relax_domain_level;
6450	if (request < sd->level) {
6451		/* turn off idle balance on this domain */
6452		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6453	} else {
6454		/* turn on idle balance on this domain */
6455		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6456	}
6457}
6458
6459static void __sdt_free(const struct cpumask *cpu_map);
6460static int __sdt_alloc(const struct cpumask *cpu_map);
6461
6462static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6463				 const struct cpumask *cpu_map)
6464{
6465	switch (what) {
6466	case sa_rootdomain:
6467		if (!atomic_read(&d->rd->refcount))
6468			free_rootdomain(&d->rd->rcu); /* fall through */
6469	case sa_sd:
6470		free_percpu(d->sd); /* fall through */
6471	case sa_sd_storage:
6472		__sdt_free(cpu_map); /* fall through */
6473	case sa_none:
6474		break;
6475	}
6476}
6477
6478static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6479						   const struct cpumask *cpu_map)
6480{
6481	memset(d, 0, sizeof(*d));
6482
6483	if (__sdt_alloc(cpu_map))
6484		return sa_sd_storage;
6485	d->sd = alloc_percpu(struct sched_domain *);
6486	if (!d->sd)
6487		return sa_sd_storage;
6488	d->rd = alloc_rootdomain();
6489	if (!d->rd)
6490		return sa_sd;
6491	return sa_rootdomain;
6492}
6493
6494/*
6495 * NULL the sd_data elements we've used to build the sched_domain and
6496 * sched_group structure so that the subsequent __free_domain_allocs()
6497 * will not free the data we're using.
6498 */
6499static void claim_allocations(int cpu, struct sched_domain *sd)
6500{
6501	struct sd_data *sdd = sd->private;
6502
6503	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6504	*per_cpu_ptr(sdd->sd, cpu) = NULL;
6505
6506	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6507		*per_cpu_ptr(sdd->sg, cpu) = NULL;
6508
6509	if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
6510		*per_cpu_ptr(sdd->sgp, cpu) = NULL;
6511}
6512
6513#ifdef CONFIG_SCHED_SMT
6514static const struct cpumask *cpu_smt_mask(int cpu)
6515{
6516	return topology_thread_cpumask(cpu);
6517}
6518#endif
6519
6520/*
6521 * Topology list, bottom-up.
6522 */
6523static struct sched_domain_topology_level default_topology[] = {
6524#ifdef CONFIG_SCHED_SMT
6525	{ sd_init_SIBLING, cpu_smt_mask, },
6526#endif
6527#ifdef CONFIG_SCHED_MC
6528	{ sd_init_MC, cpu_coregroup_mask, },
6529#endif
6530#ifdef CONFIG_SCHED_BOOK
6531	{ sd_init_BOOK, cpu_book_mask, },
6532#endif
6533	{ sd_init_CPU, cpu_cpu_mask, },
6534	{ NULL, },
6535};
6536
6537static struct sched_domain_topology_level *sched_domain_topology = default_topology;
6538
6539#ifdef CONFIG_NUMA
6540
6541static int sched_domains_numa_levels;
6542static int *sched_domains_numa_distance;
6543static struct cpumask ***sched_domains_numa_masks;
6544static int sched_domains_curr_level;
6545
6546static inline int sd_local_flags(int level)
6547{
6548	if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
6549		return 0;
6550
6551	return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
6552}
6553
6554static struct sched_domain *
6555sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
6556{
6557	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6558	int level = tl->numa_level;
6559	int sd_weight = cpumask_weight(
6560			sched_domains_numa_masks[level][cpu_to_node(cpu)]);
6561
6562	*sd = (struct sched_domain){
6563		.min_interval		= sd_weight,
6564		.max_interval		= 2*sd_weight,
6565		.busy_factor		= 32,
6566		.imbalance_pct		= 125,
6567		.cache_nice_tries	= 2,
6568		.busy_idx		= 3,
6569		.idle_idx		= 2,
6570		.newidle_idx		= 0,
6571		.wake_idx		= 0,
6572		.forkexec_idx		= 0,
6573
6574		.flags			= 1*SD_LOAD_BALANCE
6575					| 1*SD_BALANCE_NEWIDLE
6576					| 0*SD_BALANCE_EXEC
6577					| 0*SD_BALANCE_FORK
6578					| 0*SD_BALANCE_WAKE
6579					| 0*SD_WAKE_AFFINE
6580					| 0*SD_PREFER_LOCAL
6581					| 0*SD_SHARE_CPUPOWER
6582					| 0*SD_SHARE_PKG_RESOURCES
6583					| 1*SD_SERIALIZE
6584					| 0*SD_PREFER_SIBLING
6585					| sd_local_flags(level)
6586					,
6587		.last_balance		= jiffies,
6588		.balance_interval	= sd_weight,
6589	};
6590	SD_INIT_NAME(sd, NUMA);
6591	sd->private = &tl->data;
6592
6593	/*
6594	 * Ugly hack to pass state to sd_numa_mask()...
6595	 */
6596	sched_domains_curr_level = tl->numa_level;
6597
6598	return sd;
6599}
6600
6601static const struct cpumask *sd_numa_mask(int cpu)
6602{
6603	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6604}
6605
6606static void sched_numa_warn(const char *str)
6607{
6608	static int done = false;
6609	int i,j;
6610
6611	if (done)
6612		return;
6613
6614	done = true;
6615
6616	printk(KERN_WARNING "ERROR: %s\n\n", str);
6617
6618	for (i = 0; i < nr_node_ids; i++) {
6619		printk(KERN_WARNING "  ");
6620		for (j = 0; j < nr_node_ids; j++)
6621			printk(KERN_CONT "%02d ", node_distance(i,j));
6622		printk(KERN_CONT "\n");
6623	}
6624	printk(KERN_WARNING "\n");
6625}
6626
6627static bool find_numa_distance(int distance)
6628{
6629	int i;
6630
6631	if (distance == node_distance(0, 0))
6632		return true;
6633
6634	for (i = 0; i < sched_domains_numa_levels; i++) {
6635		if (sched_domains_numa_distance[i] == distance)
6636			return true;
6637	}
6638
6639	return false;
6640}
6641
6642static void sched_init_numa(void)
6643{
6644	int next_distance, curr_distance = node_distance(0, 0);
6645	struct sched_domain_topology_level *tl;
6646	int level = 0;
6647	int i, j, k;
6648
6649	sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6650	if (!sched_domains_numa_distance)
6651		return;
6652
6653	/*
6654	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6655	 * unique distances in the node_distance() table.
6656	 *
6657	 * Assumes node_distance(0,j) includes all distances in
6658	 * node_distance(i,j) in order to avoid cubic time.
6659	 */
6660	next_distance = curr_distance;
6661	for (i = 0; i < nr_node_ids; i++) {
6662		for (j = 0; j < nr_node_ids; j++) {
6663			for (k = 0; k < nr_node_ids; k++) {
6664				int distance = node_distance(i, k);
6665
6666				if (distance > curr_distance &&
6667				    (distance < next_distance ||
6668				     next_distance == curr_distance))
6669					next_distance = distance;
6670
6671				/*
6672				 * While not a strong assumption it would be nice to know
6673				 * about cases where if node A is connected to B, B is not
6674				 * equally connected to A.
6675				 */
6676				if (sched_debug() && node_distance(k, i) != distance)
6677					sched_numa_warn("Node-distance not symmetric");
6678
6679				if (sched_debug() && i && !find_numa_distance(distance))
6680					sched_numa_warn("Node-0 not representative");
6681			}
6682			if (next_distance != curr_distance) {
6683				sched_domains_numa_distance[level++] = next_distance;
6684				sched_domains_numa_levels = level;
6685				curr_distance = next_distance;
6686			} else break;
6687		}
6688
6689		/*
6690		 * In case of sched_debug() we verify the above assumption.
6691		 */
6692		if (!sched_debug())
6693			break;
6694	}
6695	/*
6696	 * 'level' contains the number of unique distances, excluding the
6697	 * identity distance node_distance(i,i).
6698	 *
6699	 * The sched_domains_nume_distance[] array includes the actual distance
6700	 * numbers.
6701	 */
6702
6703	sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6704	if (!sched_domains_numa_masks)
6705		return;
6706
6707	/*
6708	 * Now for each level, construct a mask per node which contains all
6709	 * cpus of nodes that are that many hops away from us.
6710	 */
6711	for (i = 0; i < level; i++) {
6712		sched_domains_numa_masks[i] =
6713			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6714		if (!sched_domains_numa_masks[i])
6715			return;
6716
6717		for (j = 0; j < nr_node_ids; j++) {
6718			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6719			if (!mask)
6720				return;
6721
6722			sched_domains_numa_masks[i][j] = mask;
6723
6724			for (k = 0; k < nr_node_ids; k++) {
6725				if (node_distance(j, k) > sched_domains_numa_distance[i])
6726					continue;
6727
6728				cpumask_or(mask, mask, cpumask_of_node(k));
6729			}
6730		}
6731	}
6732
6733	tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
6734			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6735	if (!tl)
6736		return;
6737
6738	/*
6739	 * Copy the default topology bits..
6740	 */
6741	for (i = 0; default_topology[i].init; i++)
6742		tl[i] = default_topology[i];
6743
6744	/*
6745	 * .. and append 'j' levels of NUMA goodness.
6746	 */
6747	for (j = 0; j < level; i++, j++) {
6748		tl[i] = (struct sched_domain_topology_level){
6749			.init = sd_numa_init,
6750			.mask = sd_numa_mask,
6751			.flags = SDTL_OVERLAP,
6752			.numa_level = j,
6753		};
6754	}
6755
6756	sched_domain_topology = tl;
6757}
6758#else
6759static inline void sched_init_numa(void)
6760{
6761}
6762#endif /* CONFIG_NUMA */
6763
6764static int __sdt_alloc(const struct cpumask *cpu_map)
6765{
6766	struct sched_domain_topology_level *tl;
6767	int j;
6768
6769	for (tl = sched_domain_topology; tl->init; tl++) {
6770		struct sd_data *sdd = &tl->data;
6771
6772		sdd->sd = alloc_percpu(struct sched_domain *);
6773		if (!sdd->sd)
6774			return -ENOMEM;
6775
6776		sdd->sg = alloc_percpu(struct sched_group *);
6777		if (!sdd->sg)
6778			return -ENOMEM;
6779
6780		sdd->sgp = alloc_percpu(struct sched_group_power *);
6781		if (!sdd->sgp)
6782			return -ENOMEM;
6783
6784		for_each_cpu(j, cpu_map) {
6785			struct sched_domain *sd;
6786			struct sched_group *sg;
6787			struct sched_group_power *sgp;
6788
6789		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6790					GFP_KERNEL, cpu_to_node(j));
6791			if (!sd)
6792				return -ENOMEM;
6793
6794			*per_cpu_ptr(sdd->sd, j) = sd;
6795
6796			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6797					GFP_KERNEL, cpu_to_node(j));
6798			if (!sg)
6799				return -ENOMEM;
6800
6801			sg->next = sg;
6802
6803			*per_cpu_ptr(sdd->sg, j) = sg;
6804
6805			sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
6806					GFP_KERNEL, cpu_to_node(j));
6807			if (!sgp)
6808				return -ENOMEM;
6809
6810			*per_cpu_ptr(sdd->sgp, j) = sgp;
6811		}
6812	}
6813
6814	return 0;
6815}
6816
6817static void __sdt_free(const struct cpumask *cpu_map)
6818{
6819	struct sched_domain_topology_level *tl;
6820	int j;
6821
6822	for (tl = sched_domain_topology; tl->init; tl++) {
6823		struct sd_data *sdd = &tl->data;
6824
6825		for_each_cpu(j, cpu_map) {
6826			struct sched_domain *sd;
6827
6828			if (sdd->sd) {
6829				sd = *per_cpu_ptr(sdd->sd, j);
6830				if (sd && (sd->flags & SD_OVERLAP))
6831					free_sched_groups(sd->groups, 0);
6832				kfree(*per_cpu_ptr(sdd->sd, j));
6833			}
6834
6835			if (sdd->sg)
6836				kfree(*per_cpu_ptr(sdd->sg, j));
6837			if (sdd->sgp)
6838				kfree(*per_cpu_ptr(sdd->sgp, j));
6839		}
6840		free_percpu(sdd->sd);
6841		sdd->sd = NULL;
6842		free_percpu(sdd->sg);
6843		sdd->sg = NULL;
6844		free_percpu(sdd->sgp);
6845		sdd->sgp = NULL;
6846	}
6847}
6848
6849struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6850		struct s_data *d, const struct cpumask *cpu_map,
6851		struct sched_domain_attr *attr, struct sched_domain *child,
6852		int cpu)
6853{
6854	struct sched_domain *sd = tl->init(tl, cpu);
6855	if (!sd)
6856		return child;
6857
6858	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6859	if (child) {
6860		sd->level = child->level + 1;
6861		sched_domain_level_max = max(sched_domain_level_max, sd->level);
6862		child->parent = sd;
6863	}
6864	sd->child = child;
6865	set_domain_attribute(sd, attr);
6866
6867	return sd;
6868}
6869
6870/*
6871 * Build sched domains for a given set of cpus and attach the sched domains
6872 * to the individual cpus
6873 */
6874static int build_sched_domains(const struct cpumask *cpu_map,
6875			       struct sched_domain_attr *attr)
6876{
6877	enum s_alloc alloc_state = sa_none;
6878	struct sched_domain *sd;
6879	struct s_data d;
6880	int i, ret = -ENOMEM;
6881
6882	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6883	if (alloc_state != sa_rootdomain)
6884		goto error;
6885
6886	/* Set up domains for cpus specified by the cpu_map. */
6887	for_each_cpu(i, cpu_map) {
6888		struct sched_domain_topology_level *tl;
6889
6890		sd = NULL;
6891		for (tl = sched_domain_topology; tl->init; tl++) {
6892			sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
6893			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6894				sd->flags |= SD_OVERLAP;
6895			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6896				break;
6897		}
6898
6899		while (sd->child)
6900			sd = sd->child;
6901
6902		*per_cpu_ptr(d.sd, i) = sd;
6903	}
6904
6905	/* Build the groups for the domains */
6906	for_each_cpu(i, cpu_map) {
6907		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6908			sd->span_weight = cpumask_weight(sched_domain_span(sd));
6909			if (sd->flags & SD_OVERLAP) {
6910				if (build_overlap_sched_groups(sd, i))
6911					goto error;
6912			} else {
6913				if (build_sched_groups(sd, i))
6914					goto error;
6915			}
6916		}
6917	}
6918
6919	/* Calculate CPU power for physical packages and nodes */
6920	for (i = nr_cpumask_bits-1; i >= 0; i--) {
6921		if (!cpumask_test_cpu(i, cpu_map))
6922			continue;
6923
6924		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6925			claim_allocations(i, sd);
6926			init_sched_groups_power(i, sd);
6927		}
6928	}
6929
6930	/* Attach the domains */
6931	rcu_read_lock();
6932	for_each_cpu(i, cpu_map) {
6933		sd = *per_cpu_ptr(d.sd, i);
6934		cpu_attach_domain(sd, d.rd, i);
6935	}
6936	rcu_read_unlock();
6937
6938	ret = 0;
6939error:
6940	__free_domain_allocs(&d, alloc_state, cpu_map);
6941	return ret;
6942}
6943
6944static cpumask_var_t *doms_cur;	/* current sched domains */
6945static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
6946static struct sched_domain_attr *dattr_cur;
6947				/* attribues of custom domains in 'doms_cur' */
6948
6949/*
6950 * Special case: If a kmalloc of a doms_cur partition (array of
6951 * cpumask) fails, then fallback to a single sched domain,
6952 * as determined by the single cpumask fallback_doms.
6953 */
6954static cpumask_var_t fallback_doms;
6955
6956/*
6957 * arch_update_cpu_topology lets virtualized architectures update the
6958 * cpu core maps. It is supposed to return 1 if the topology changed
6959 * or 0 if it stayed the same.
6960 */
6961int __attribute__((weak)) arch_update_cpu_topology(void)
6962{
6963	return 0;
6964}
6965
6966cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6967{
6968	int i;
6969	cpumask_var_t *doms;
6970
6971	doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6972	if (!doms)
6973		return NULL;
6974	for (i = 0; i < ndoms; i++) {
6975		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6976			free_sched_domains(doms, i);
6977			return NULL;
6978		}
6979	}
6980	return doms;
6981}
6982
6983void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6984{
6985	unsigned int i;
6986	for (i = 0; i < ndoms; i++)
6987		free_cpumask_var(doms[i]);
6988	kfree(doms);
6989}
6990
6991/*
6992 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6993 * For now this just excludes isolated cpus, but could be used to
6994 * exclude other special cases in the future.
6995 */
6996static int init_sched_domains(const struct cpumask *cpu_map)
6997{
6998	int err;
6999
7000	arch_update_cpu_topology();
7001	ndoms_cur = 1;
7002	doms_cur = alloc_sched_domains(ndoms_cur);
7003	if (!doms_cur)
7004		doms_cur = &fallback_doms;
7005	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
7006	err = build_sched_domains(doms_cur[0], NULL);
7007	register_sched_domain_sysctl();
7008
7009	return err;
7010}
7011
7012/*
7013 * Detach sched domains from a group of cpus specified in cpu_map
7014 * These cpus will now be attached to the NULL domain
7015 */
7016static void detach_destroy_domains(const struct cpumask *cpu_map)
7017{
7018	int i;
7019
7020	rcu_read_lock();
7021	for_each_cpu(i, cpu_map)
7022		cpu_attach_domain(NULL, &def_root_domain, i);
7023	rcu_read_unlock();
7024}
7025
7026/* handle null as "default" */
7027static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7028			struct sched_domain_attr *new, int idx_new)
7029{
7030	struct sched_domain_attr tmp;
7031
7032	/* fast path */
7033	if (!new && !cur)
7034		return 1;
7035
7036	tmp = SD_ATTR_INIT;
7037	return !memcmp(cur ? (cur + idx_cur) : &tmp,
7038			new ? (new + idx_new) : &tmp,
7039			sizeof(struct sched_domain_attr));
7040}
7041
7042/*
7043 * Partition sched domains as specified by the 'ndoms_new'
7044 * cpumasks in the array doms_new[] of cpumasks. This compares
7045 * doms_new[] to the current sched domain partitioning, doms_cur[].
7046 * It destroys each deleted domain and builds each new domain.
7047 *
7048 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
7049 * The masks don't intersect (don't overlap.) We should setup one
7050 * sched domain for each mask. CPUs not in any of the cpumasks will
7051 * not be load balanced. If the same cpumask appears both in the
7052 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7053 * it as it is.
7054 *
7055 * The passed in 'doms_new' should be allocated using
7056 * alloc_sched_domains.  This routine takes ownership of it and will
7057 * free_sched_domains it when done with it. If the caller failed the
7058 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7059 * and partition_sched_domains() will fallback to the single partition
7060 * 'fallback_doms', it also forces the domains to be rebuilt.
7061 *
7062 * If doms_new == NULL it will be replaced with cpu_online_mask.
7063 * ndoms_new == 0 is a special case for destroying existing domains,
7064 * and it will not create the default domain.
7065 *
7066 * Call with hotplug lock held
7067 */
7068void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
7069			     struct sched_domain_attr *dattr_new)
7070{
7071	int i, j, n;
7072	int new_topology;
7073
7074	mutex_lock(&sched_domains_mutex);
7075
7076	/* always unregister in case we don't destroy any domains */
7077	unregister_sched_domain_sysctl();
7078
7079	/* Let architecture update cpu core mappings. */
7080	new_topology = arch_update_cpu_topology();
7081
7082	n = doms_new ? ndoms_new : 0;
7083
7084	/* Destroy deleted domains */
7085	for (i = 0; i < ndoms_cur; i++) {
7086		for (j = 0; j < n && !new_topology; j++) {
7087			if (cpumask_equal(doms_cur[i], doms_new[j])
7088			    && dattrs_equal(dattr_cur, i, dattr_new, j))
7089				goto match1;
7090		}
7091		/* no match - a current sched domain not in new doms_new[] */
7092		detach_destroy_domains(doms_cur[i]);
7093match1:
7094		;
7095	}
7096
7097	if (doms_new == NULL) {
7098		ndoms_cur = 0;
7099		doms_new = &fallback_doms;
7100		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
7101		WARN_ON_ONCE(dattr_new);
7102	}
7103
7104	/* Build new domains */
7105	for (i = 0; i < ndoms_new; i++) {
7106		for (j = 0; j < ndoms_cur && !new_topology; j++) {
7107			if (cpumask_equal(doms_new[i], doms_cur[j])
7108			    && dattrs_equal(dattr_new, i, dattr_cur, j))
7109				goto match2;
7110		}
7111		/* no match - add a new doms_new */
7112		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
7113match2:
7114		;
7115	}
7116
7117	/* Remember the new sched domains */
7118	if (doms_cur != &fallback_doms)
7119		free_sched_domains(doms_cur, ndoms_cur);
7120	kfree(dattr_cur);	/* kfree(NULL) is safe */
7121	doms_cur = doms_new;
7122	dattr_cur = dattr_new;
7123	ndoms_cur = ndoms_new;
7124
7125	register_sched_domain_sysctl();
7126
7127	mutex_unlock(&sched_domains_mutex);
7128}
7129
7130static int num_cpus_frozen;	/* used to mark begin/end of suspend/resume */
7131
7132/*
7133 * Update cpusets according to cpu_active mask.  If cpusets are
7134 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7135 * around partition_sched_domains().
7136 *
7137 * If we come here as part of a suspend/resume, don't touch cpusets because we
7138 * want to restore it back to its original state upon resume anyway.
7139 */
7140static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7141			     void *hcpu)
7142{
7143	switch (action) {
7144	case CPU_ONLINE_FROZEN:
7145	case CPU_DOWN_FAILED_FROZEN:
7146
7147		/*
7148		 * num_cpus_frozen tracks how many CPUs are involved in suspend
7149		 * resume sequence. As long as this is not the last online
7150		 * operation in the resume sequence, just build a single sched
7151		 * domain, ignoring cpusets.
7152		 */
7153		num_cpus_frozen--;
7154		if (likely(num_cpus_frozen)) {
7155			partition_sched_domains(1, NULL, NULL);
7156			break;
7157		}
7158
7159		/*
7160		 * This is the last CPU online operation. So fall through and
7161		 * restore the original sched domains by considering the
7162		 * cpuset configurations.
7163		 */
7164
7165	case CPU_ONLINE:
7166	case CPU_DOWN_FAILED:
7167		cpuset_update_active_cpus(true);
7168		break;
7169	default:
7170		return NOTIFY_DONE;
7171	}
7172	return NOTIFY_OK;
7173}
7174
7175static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7176			       void *hcpu)
7177{
7178	switch (action) {
7179	case CPU_DOWN_PREPARE:
7180		cpuset_update_active_cpus(false);
7181		break;
7182	case CPU_DOWN_PREPARE_FROZEN:
7183		num_cpus_frozen++;
7184		partition_sched_domains(1, NULL, NULL);
7185		break;
7186	default:
7187		return NOTIFY_DONE;
7188	}
7189	return NOTIFY_OK;
7190}
7191
7192void __init sched_init_smp(void)
7193{
7194	cpumask_var_t non_isolated_cpus;
7195
7196	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7197	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7198
7199	sched_init_numa();
7200
7201	get_online_cpus();
7202	mutex_lock(&sched_domains_mutex);
7203	init_sched_domains(cpu_active_mask);
7204	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7205	if (cpumask_empty(non_isolated_cpus))
7206		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
7207	mutex_unlock(&sched_domains_mutex);
7208	put_online_cpus();
7209
7210	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7211	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7212
7213	/* RT runtime code needs to handle some hotplug events */
7214	hotcpu_notifier(update_runtime, 0);
7215
7216	init_hrtick();
7217
7218	/* Move init over to a non-isolated CPU */
7219	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
7220		BUG();
7221	sched_init_granularity();
7222	free_cpumask_var(non_isolated_cpus);
7223
7224	init_sched_rt_class();
7225}
7226#else
7227void __init sched_init_smp(void)
7228{
7229	sched_init_granularity();
7230}
7231#endif /* CONFIG_SMP */
7232
7233const_debug unsigned int sysctl_timer_migration = 1;
7234
7235int in_sched_functions(unsigned long addr)
7236{
7237	return in_lock_functions(addr) ||
7238		(addr >= (unsigned long)__sched_text_start
7239		&& addr < (unsigned long)__sched_text_end);
7240}
7241
7242#ifdef CONFIG_CGROUP_SCHED
7243struct task_group root_task_group;
7244LIST_HEAD(task_groups);
7245#endif
7246
7247DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
7248
7249void __init sched_init(void)
7250{
7251	int i, j;
7252	unsigned long alloc_size = 0, ptr;
7253
7254#ifdef CONFIG_FAIR_GROUP_SCHED
7255	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7256#endif
7257#ifdef CONFIG_RT_GROUP_SCHED
7258	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7259#endif
7260#ifdef CONFIG_CPUMASK_OFFSTACK
7261	alloc_size += num_possible_cpus() * cpumask_size();
7262#endif
7263	if (alloc_size) {
7264		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7265
7266#ifdef CONFIG_FAIR_GROUP_SCHED
7267		root_task_group.se = (struct sched_entity **)ptr;
7268		ptr += nr_cpu_ids * sizeof(void **);
7269
7270		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7271		ptr += nr_cpu_ids * sizeof(void **);
7272
7273#endif /* CONFIG_FAIR_GROUP_SCHED */
7274#ifdef CONFIG_RT_GROUP_SCHED
7275		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7276		ptr += nr_cpu_ids * sizeof(void **);
7277
7278		root_task_group.rt_rq = (struct rt_rq **)ptr;
7279		ptr += nr_cpu_ids * sizeof(void **);
7280
7281#endif /* CONFIG_RT_GROUP_SCHED */
7282#ifdef CONFIG_CPUMASK_OFFSTACK
7283		for_each_possible_cpu(i) {
7284			per_cpu(load_balance_tmpmask, i) = (void *)ptr;
7285			ptr += cpumask_size();
7286		}
7287#endif /* CONFIG_CPUMASK_OFFSTACK */
7288	}
7289
7290#ifdef CONFIG_SMP
7291	init_defrootdomain();
7292#endif
7293
7294	init_rt_bandwidth(&def_rt_bandwidth,
7295			global_rt_period(), global_rt_runtime());
7296
7297#ifdef CONFIG_RT_GROUP_SCHED
7298	init_rt_bandwidth(&root_task_group.rt_bandwidth,
7299			global_rt_period(), global_rt_runtime());
7300#endif /* CONFIG_RT_GROUP_SCHED */
7301
7302#ifdef CONFIG_CGROUP_SCHED
7303	list_add(&root_task_group.list, &task_groups);
7304	INIT_LIST_HEAD(&root_task_group.children);
7305	INIT_LIST_HEAD(&root_task_group.siblings);
7306	autogroup_init(&init_task);
7307
7308#endif /* CONFIG_CGROUP_SCHED */
7309
7310#ifdef CONFIG_CGROUP_CPUACCT
7311	root_cpuacct.cpustat = &kernel_cpustat;
7312	root_cpuacct.cpuusage = alloc_percpu(u64);
7313	/* Too early, not expected to fail */
7314	BUG_ON(!root_cpuacct.cpuusage);
7315#endif
7316	for_each_possible_cpu(i) {
7317		struct rq *rq;
7318
7319		rq = cpu_rq(i);
7320		raw_spin_lock_init(&rq->lock);
7321		rq->nr_running = 0;
7322		rq->calc_load_active = 0;
7323		rq->calc_load_update = jiffies + LOAD_FREQ;
7324		init_cfs_rq(&rq->cfs);
7325		init_rt_rq(&rq->rt, rq);
7326#ifdef CONFIG_FAIR_GROUP_SCHED
7327		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7328		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7329		/*
7330		 * How much cpu bandwidth does root_task_group get?
7331		 *
7332		 * In case of task-groups formed thr' the cgroup filesystem, it
7333		 * gets 100% of the cpu resources in the system. This overall
7334		 * system cpu resource is divided among the tasks of
7335		 * root_task_group and its child task-groups in a fair manner,
7336		 * based on each entity's (task or task-group's) weight
7337		 * (se->load.weight).
7338		 *
7339		 * In other words, if root_task_group has 10 tasks of weight
7340		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7341		 * then A0's share of the cpu resource is:
7342		 *
7343		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
7344		 *
7345		 * We achieve this by letting root_task_group's tasks sit
7346		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
7347		 */
7348		init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7349		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7350#endif /* CONFIG_FAIR_GROUP_SCHED */
7351
7352		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7353#ifdef CONFIG_RT_GROUP_SCHED
7354		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7355		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7356#endif
7357
7358		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7359			rq->cpu_load[j] = 0;
7360
7361		rq->last_load_update_tick = jiffies;
7362
7363#ifdef CONFIG_SMP
7364		rq->sd = NULL;
7365		rq->rd = NULL;
7366		rq->cpu_power = SCHED_POWER_SCALE;
7367		rq->post_schedule = 0;
7368		rq->active_balance = 0;
7369		rq->next_balance = jiffies;
7370		rq->push_cpu = 0;
7371		rq->cpu = i;
7372		rq->online = 0;
7373		rq->idle_stamp = 0;
7374		rq->avg_idle = 2*sysctl_sched_migration_cost;
7375
7376		INIT_LIST_HEAD(&rq->cfs_tasks);
7377
7378		rq_attach_root(rq, &def_root_domain);
7379#ifdef CONFIG_NO_HZ
7380		rq->nohz_flags = 0;
7381#endif
7382#endif
7383		init_rq_hrtick(rq);
7384		atomic_set(&rq->nr_iowait, 0);
7385	}
7386
7387	set_load_weight(&init_task);
7388
7389#ifdef CONFIG_PREEMPT_NOTIFIERS
7390	INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7391#endif
7392
7393#ifdef CONFIG_RT_MUTEXES
7394	plist_head_init(&init_task.pi_waiters);
7395#endif
7396
7397	/*
7398	 * The boot idle thread does lazy MMU switching as well:
7399	 */
7400	atomic_inc(&init_mm.mm_count);
7401	enter_lazy_tlb(&init_mm, current);
7402
7403	/*
7404	 * Make us the idle thread. Technically, schedule() should not be
7405	 * called from this thread, however somewhere below it might be,
7406	 * but because we are the idle thread, we just pick up running again
7407	 * when this runqueue becomes "idle".
7408	 */
7409	init_idle(current, smp_processor_id());
7410
7411	calc_load_update = jiffies + LOAD_FREQ;
7412
7413	/*
7414	 * During early bootup we pretend to be a normal task:
7415	 */
7416	current->sched_class = &fair_sched_class;
7417
7418#ifdef CONFIG_SMP
7419	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7420	/* May be allocated at isolcpus cmdline parse time */
7421	if (cpu_isolated_map == NULL)
7422		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
7423	idle_thread_set_boot_cpu();
7424#endif
7425	init_sched_fair_class();
7426
7427	scheduler_running = 1;
7428}
7429
7430#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7431static inline int preempt_count_equals(int preempt_offset)
7432{
7433	int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
7434
7435	return (nested == preempt_offset);
7436}
7437
7438void __might_sleep(const char *file, int line, int preempt_offset)
7439{
7440	static unsigned long prev_jiffy;	/* ratelimiting */
7441
7442	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
7443	if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
7444	    system_state != SYSTEM_RUNNING || oops_in_progress)
7445		return;
7446	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7447		return;
7448	prev_jiffy = jiffies;
7449
7450	printk(KERN_ERR
7451		"BUG: sleeping function called from invalid context at %s:%d\n",
7452			file, line);
7453	printk(KERN_ERR
7454		"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7455			in_atomic(), irqs_disabled(),
7456			current->pid, current->comm);
7457
7458	debug_show_held_locks(current);
7459	if (irqs_disabled())
7460		print_irqtrace_events(current);
7461	dump_stack();
7462}
7463EXPORT_SYMBOL(__might_sleep);
7464#endif
7465
7466#ifdef CONFIG_MAGIC_SYSRQ
7467static void normalize_task(struct rq *rq, struct task_struct *p)
7468{
7469	const struct sched_class *prev_class = p->sched_class;
7470	int old_prio = p->prio;
7471	int on_rq;
7472
7473	on_rq = p->on_rq;
7474	if (on_rq)
7475		dequeue_task(rq, p, 0);
7476	__setscheduler(rq, p, SCHED_NORMAL, 0);
7477	if (on_rq) {
7478		enqueue_task(rq, p, 0);
7479		resched_task(rq->curr);
7480	}
7481
7482	check_class_changed(rq, p, prev_class, old_prio);
7483}
7484
7485void normalize_rt_tasks(void)
7486{
7487	struct task_struct *g, *p;
7488	unsigned long flags;
7489	struct rq *rq;
7490
7491	read_lock_irqsave(&tasklist_lock, flags);
7492	do_each_thread(g, p) {
7493		/*
7494		 * Only normalize user tasks:
7495		 */
7496		if (!p->mm)
7497			continue;
7498
7499		p->se.exec_start		= 0;
7500#ifdef CONFIG_SCHEDSTATS
7501		p->se.statistics.wait_start	= 0;
7502		p->se.statistics.sleep_start	= 0;
7503		p->se.statistics.block_start	= 0;
7504#endif
7505
7506		if (!rt_task(p)) {
7507			/*
7508			 * Renice negative nice level userspace
7509			 * tasks back to 0:
7510			 */
7511			if (TASK_NICE(p) < 0 && p->mm)
7512				set_user_nice(p, 0);
7513			continue;
7514		}
7515
7516		raw_spin_lock(&p->pi_lock);
7517		rq = __task_rq_lock(p);
7518
7519		normalize_task(rq, p);
7520
7521		__task_rq_unlock(rq);
7522		raw_spin_unlock(&p->pi_lock);
7523	} while_each_thread(g, p);
7524
7525	read_unlock_irqrestore(&tasklist_lock, flags);
7526}
7527
7528#endif /* CONFIG_MAGIC_SYSRQ */
7529
7530#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7531/*
7532 * These functions are only useful for the IA64 MCA handling, or kdb.
7533 *
7534 * They can only be called when the whole system has been
7535 * stopped - every CPU needs to be quiescent, and no scheduling
7536 * activity can take place. Using them for anything else would
7537 * be a serious bug, and as a result, they aren't even visible
7538 * under any other configuration.
7539 */
7540
7541/**
7542 * curr_task - return the current task for a given cpu.
7543 * @cpu: the processor in question.
7544 *
7545 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7546 */
7547struct task_struct *curr_task(int cpu)
7548{
7549	return cpu_curr(cpu);
7550}
7551
7552#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7553
7554#ifdef CONFIG_IA64
7555/**
7556 * set_curr_task - set the current task for a given cpu.
7557 * @cpu: the processor in question.
7558 * @p: the task pointer to set.
7559 *
7560 * Description: This function must only be used when non-maskable interrupts
7561 * are serviced on a separate stack. It allows the architecture to switch the
7562 * notion of the current task on a cpu in a non-blocking manner. This function
7563 * must be called with all CPU's synchronized, and interrupts disabled, the
7564 * and caller must save the original value of the current task (see
7565 * curr_task() above) and restore that value before reenabling interrupts and
7566 * re-starting the system.
7567 *
7568 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7569 */
7570void set_curr_task(int cpu, struct task_struct *p)
7571{
7572	cpu_curr(cpu) = p;
7573}
7574
7575#endif
7576
7577#ifdef CONFIG_CGROUP_SCHED
7578/* task_group_lock serializes the addition/removal of task groups */
7579static DEFINE_SPINLOCK(task_group_lock);
7580
7581static void free_sched_group(struct task_group *tg)
7582{
7583	free_fair_sched_group(tg);
7584	free_rt_sched_group(tg);
7585	autogroup_free(tg);
7586	kfree(tg);
7587}
7588
7589/* allocate runqueue etc for a new task group */
7590struct task_group *sched_create_group(struct task_group *parent)
7591{
7592	struct task_group *tg;
7593	unsigned long flags;
7594
7595	tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7596	if (!tg)
7597		return ERR_PTR(-ENOMEM);
7598
7599	if (!alloc_fair_sched_group(tg, parent))
7600		goto err;
7601
7602	if (!alloc_rt_sched_group(tg, parent))
7603		goto err;
7604
7605	spin_lock_irqsave(&task_group_lock, flags);
7606	list_add_rcu(&tg->list, &task_groups);
7607
7608	WARN_ON(!parent); /* root should already exist */
7609
7610	tg->parent = parent;
7611	INIT_LIST_HEAD(&tg->children);
7612	list_add_rcu(&tg->siblings, &parent->children);
7613	spin_unlock_irqrestore(&task_group_lock, flags);
7614
7615	return tg;
7616
7617err:
7618	free_sched_group(tg);
7619	return ERR_PTR(-ENOMEM);
7620}
7621
7622/* rcu callback to free various structures associated with a task group */
7623static void free_sched_group_rcu(struct rcu_head *rhp)
7624{
7625	/* now it should be safe to free those cfs_rqs */
7626	free_sched_group(container_of(rhp, struct task_group, rcu));
7627}
7628
7629/* Destroy runqueue etc associated with a task group */
7630void sched_destroy_group(struct task_group *tg)
7631{
7632	unsigned long flags;
7633	int i;
7634
7635	/* end participation in shares distribution */
7636	for_each_possible_cpu(i)
7637		unregister_fair_sched_group(tg, i);
7638
7639	spin_lock_irqsave(&task_group_lock, flags);
7640	list_del_rcu(&tg->list);
7641	list_del_rcu(&tg->siblings);
7642	spin_unlock_irqrestore(&task_group_lock, flags);
7643
7644	/* wait for possible concurrent references to cfs_rqs complete */
7645	call_rcu(&tg->rcu, free_sched_group_rcu);
7646}
7647
7648/* change task's runqueue when it moves between groups.
7649 *	The caller of this function should have put the task in its new group
7650 *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7651 *	reflect its new group.
7652 */
7653void sched_move_task(struct task_struct *tsk)
7654{
7655	struct task_group *tg;
7656	int on_rq, running;
7657	unsigned long flags;
7658	struct rq *rq;
7659
7660	rq = task_rq_lock(tsk, &flags);
7661
7662	running = task_current(rq, tsk);
7663	on_rq = tsk->on_rq;
7664
7665	if (on_rq)
7666		dequeue_task(rq, tsk, 0);
7667	if (unlikely(running))
7668		tsk->sched_class->put_prev_task(rq, tsk);
7669
7670	tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
7671				lockdep_is_held(&tsk->sighand->siglock)),
7672			  struct task_group, css);
7673	tg = autogroup_task_group(tsk, tg);
7674	tsk->sched_task_group = tg;
7675
7676#ifdef CONFIG_FAIR_GROUP_SCHED
7677	if (tsk->sched_class->task_move_group)
7678		tsk->sched_class->task_move_group(tsk, on_rq);
7679	else
7680#endif
7681		set_task_rq(tsk, task_cpu(tsk));
7682
7683	if (unlikely(running))
7684		tsk->sched_class->set_curr_task(rq);
7685	if (on_rq)
7686		enqueue_task(rq, tsk, 0);
7687
7688	task_rq_unlock(rq, tsk, &flags);
7689}
7690#endif /* CONFIG_CGROUP_SCHED */
7691
7692#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
7693static unsigned long to_ratio(u64 period, u64 runtime)
7694{
7695	if (runtime == RUNTIME_INF)
7696		return 1ULL << 20;
7697
7698	return div64_u64(runtime << 20, period);
7699}
7700#endif
7701
7702#ifdef CONFIG_RT_GROUP_SCHED
7703/*
7704 * Ensure that the real time constraints are schedulable.
7705 */
7706static DEFINE_MUTEX(rt_constraints_mutex);
7707
7708/* Must be called with tasklist_lock held */
7709static inline int tg_has_rt_tasks(struct task_group *tg)
7710{
7711	struct task_struct *g, *p;
7712
7713	do_each_thread(g, p) {
7714		if (rt_task(p) && task_rq(p)->rt.tg == tg)
7715			return 1;
7716	} while_each_thread(g, p);
7717
7718	return 0;
7719}
7720
7721struct rt_schedulable_data {
7722	struct task_group *tg;
7723	u64 rt_period;
7724	u64 rt_runtime;
7725};
7726
7727static int tg_rt_schedulable(struct task_group *tg, void *data)
7728{
7729	struct rt_schedulable_data *d = data;
7730	struct task_group *child;
7731	unsigned long total, sum = 0;
7732	u64 period, runtime;
7733
7734	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7735	runtime = tg->rt_bandwidth.rt_runtime;
7736
7737	if (tg == d->tg) {
7738		period = d->rt_period;
7739		runtime = d->rt_runtime;
7740	}
7741
7742	/*
7743	 * Cannot have more runtime than the period.
7744	 */
7745	if (runtime > period && runtime != RUNTIME_INF)
7746		return -EINVAL;
7747
7748	/*
7749	 * Ensure we don't starve existing RT tasks.
7750	 */
7751	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7752		return -EBUSY;
7753
7754	total = to_ratio(period, runtime);
7755
7756	/*
7757	 * Nobody can have more than the global setting allows.
7758	 */
7759	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7760		return -EINVAL;
7761
7762	/*
7763	 * The sum of our children's runtime should not exceed our own.
7764	 */
7765	list_for_each_entry_rcu(child, &tg->children, siblings) {
7766		period = ktime_to_ns(child->rt_bandwidth.rt_period);
7767		runtime = child->rt_bandwidth.rt_runtime;
7768
7769		if (child == d->tg) {
7770			period = d->rt_period;
7771			runtime = d->rt_runtime;
7772		}
7773
7774		sum += to_ratio(period, runtime);
7775	}
7776
7777	if (sum > total)
7778		return -EINVAL;
7779
7780	return 0;
7781}
7782
7783static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7784{
7785	int ret;
7786
7787	struct rt_schedulable_data data = {
7788		.tg = tg,
7789		.rt_period = period,
7790		.rt_runtime = runtime,
7791	};
7792
7793	rcu_read_lock();
7794	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7795	rcu_read_unlock();
7796
7797	return ret;
7798}
7799
7800static int tg_set_rt_bandwidth(struct task_group *tg,
7801		u64 rt_period, u64 rt_runtime)
7802{
7803	int i, err = 0;
7804
7805	mutex_lock(&rt_constraints_mutex);
7806	read_lock(&tasklist_lock);
7807	err = __rt_schedulable(tg, rt_period, rt_runtime);
7808	if (err)
7809		goto unlock;
7810
7811	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7812	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7813	tg->rt_bandwidth.rt_runtime = rt_runtime;
7814
7815	for_each_possible_cpu(i) {
7816		struct rt_rq *rt_rq = tg->rt_rq[i];
7817
7818		raw_spin_lock(&rt_rq->rt_runtime_lock);
7819		rt_rq->rt_runtime = rt_runtime;
7820		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7821	}
7822	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7823unlock:
7824	read_unlock(&tasklist_lock);
7825	mutex_unlock(&rt_constraints_mutex);
7826
7827	return err;
7828}
7829
7830int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7831{
7832	u64 rt_runtime, rt_period;
7833
7834	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7835	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7836	if (rt_runtime_us < 0)
7837		rt_runtime = RUNTIME_INF;
7838
7839	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7840}
7841
7842long sched_group_rt_runtime(struct task_group *tg)
7843{
7844	u64 rt_runtime_us;
7845
7846	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
7847		return -1;
7848
7849	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
7850	do_div(rt_runtime_us, NSEC_PER_USEC);
7851	return rt_runtime_us;
7852}
7853
7854int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7855{
7856	u64 rt_runtime, rt_period;
7857
7858	rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7859	rt_runtime = tg->rt_bandwidth.rt_runtime;
7860
7861	if (rt_period == 0)
7862		return -EINVAL;
7863
7864	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7865}
7866
7867long sched_group_rt_period(struct task_group *tg)
7868{
7869	u64 rt_period_us;
7870
7871	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7872	do_div(rt_period_us, NSEC_PER_USEC);
7873	return rt_period_us;
7874}
7875
7876static int sched_rt_global_constraints(void)
7877{
7878	u64 runtime, period;
7879	int ret = 0;
7880
7881	if (sysctl_sched_rt_period <= 0)
7882		return -EINVAL;
7883
7884	runtime = global_rt_runtime();
7885	period = global_rt_period();
7886
7887	/*
7888	 * Sanity check on the sysctl variables.
7889	 */
7890	if (runtime > period && runtime != RUNTIME_INF)
7891		return -EINVAL;
7892
7893	mutex_lock(&rt_constraints_mutex);
7894	read_lock(&tasklist_lock);
7895	ret = __rt_schedulable(NULL, 0, 0);
7896	read_unlock(&tasklist_lock);
7897	mutex_unlock(&rt_constraints_mutex);
7898
7899	return ret;
7900}
7901
7902int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7903{
7904	/* Don't accept realtime tasks when there is no way for them to run */
7905	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7906		return 0;
7907
7908	return 1;
7909}
7910
7911#else /* !CONFIG_RT_GROUP_SCHED */
7912static int sched_rt_global_constraints(void)
7913{
7914	unsigned long flags;
7915	int i;
7916
7917	if (sysctl_sched_rt_period <= 0)
7918		return -EINVAL;
7919
7920	/*
7921	 * There's always some RT tasks in the root group
7922	 * -- migration, kstopmachine etc..
7923	 */
7924	if (sysctl_sched_rt_runtime == 0)
7925		return -EBUSY;
7926
7927	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
7928	for_each_possible_cpu(i) {
7929		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7930
7931		raw_spin_lock(&rt_rq->rt_runtime_lock);
7932		rt_rq->rt_runtime = global_rt_runtime();
7933		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7934	}
7935	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7936
7937	return 0;
7938}
7939#endif /* CONFIG_RT_GROUP_SCHED */
7940
7941int sched_rt_handler(struct ctl_table *table, int write,
7942		void __user *buffer, size_t *lenp,
7943		loff_t *ppos)
7944{
7945	int ret;
7946	int old_period, old_runtime;
7947	static DEFINE_MUTEX(mutex);
7948
7949	mutex_lock(&mutex);
7950	old_period = sysctl_sched_rt_period;
7951	old_runtime = sysctl_sched_rt_runtime;
7952
7953	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7954
7955	if (!ret && write) {
7956		ret = sched_rt_global_constraints();
7957		if (ret) {
7958			sysctl_sched_rt_period = old_period;
7959			sysctl_sched_rt_runtime = old_runtime;
7960		} else {
7961			def_rt_bandwidth.rt_runtime = global_rt_runtime();
7962			def_rt_bandwidth.rt_period =
7963				ns_to_ktime(global_rt_period());
7964		}
7965	}
7966	mutex_unlock(&mutex);
7967
7968	return ret;
7969}
7970
7971#ifdef CONFIG_CGROUP_SCHED
7972
7973/* return corresponding task_group object of a cgroup */
7974static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
7975{
7976	return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7977			    struct task_group, css);
7978}
7979
7980static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
7981{
7982	struct task_group *tg, *parent;
7983
7984	if (!cgrp->parent) {
7985		/* This is early initialization for the top cgroup */
7986		return &root_task_group.css;
7987	}
7988
7989	parent = cgroup_tg(cgrp->parent);
7990	tg = sched_create_group(parent);
7991	if (IS_ERR(tg))
7992		return ERR_PTR(-ENOMEM);
7993
7994	return &tg->css;
7995}
7996
7997static void cpu_cgroup_destroy(struct cgroup *cgrp)
7998{
7999	struct task_group *tg = cgroup_tg(cgrp);
8000
8001	sched_destroy_group(tg);
8002}
8003
8004static int cpu_cgroup_can_attach(struct cgroup *cgrp,
8005				 struct cgroup_taskset *tset)
8006{
8007	struct task_struct *task;
8008
8009	cgroup_taskset_for_each(task, cgrp, tset) {
8010#ifdef CONFIG_RT_GROUP_SCHED
8011		if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
8012			return -EINVAL;
8013#else
8014		/* We don't support RT-tasks being in separate groups */
8015		if (task->sched_class != &fair_sched_class)
8016			return -EINVAL;
8017#endif
8018	}
8019	return 0;
8020}
8021
8022static void cpu_cgroup_attach(struct cgroup *cgrp,
8023			      struct cgroup_taskset *tset)
8024{
8025	struct task_struct *task;
8026
8027	cgroup_taskset_for_each(task, cgrp, tset)
8028		sched_move_task(task);
8029}
8030
8031static void
8032cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
8033		struct task_struct *task)
8034{
8035	/*
8036	 * cgroup_exit() is called in the copy_process() failure path.
8037	 * Ignore this case since the task hasn't ran yet, this avoids
8038	 * trying to poke a half freed task state from generic code.
8039	 */
8040	if (!(task->flags & PF_EXITING))
8041		return;
8042
8043	sched_move_task(task);
8044}
8045
8046#ifdef CONFIG_FAIR_GROUP_SCHED
8047static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
8048				u64 shareval)
8049{
8050	return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
8051}
8052
8053static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
8054{
8055	struct task_group *tg = cgroup_tg(cgrp);
8056
8057	return (u64) scale_load_down(tg->shares);
8058}
8059
8060#ifdef CONFIG_CFS_BANDWIDTH
8061static DEFINE_MUTEX(cfs_constraints_mutex);
8062
8063const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8064const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8065
8066static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8067
8068static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8069{
8070	int i, ret = 0, runtime_enabled, runtime_was_enabled;
8071	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8072
8073	if (tg == &root_task_group)
8074		return -EINVAL;
8075
8076	/*
8077	 * Ensure we have at some amount of bandwidth every period.  This is
8078	 * to prevent reaching a state of large arrears when throttled via
8079	 * entity_tick() resulting in prolonged exit starvation.
8080	 */
8081	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8082		return -EINVAL;
8083
8084	/*
8085	 * Likewise, bound things on the otherside by preventing insane quota
8086	 * periods.  This also allows us to normalize in computing quota
8087	 * feasibility.
8088	 */
8089	if (period > max_cfs_quota_period)
8090		return -EINVAL;
8091
8092	mutex_lock(&cfs_constraints_mutex);
8093	ret = __cfs_schedulable(tg, period, quota);
8094	if (ret)
8095		goto out_unlock;
8096
8097	runtime_enabled = quota != RUNTIME_INF;
8098	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
8099	account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
8100	raw_spin_lock_irq(&cfs_b->lock);
8101	cfs_b->period = ns_to_ktime(period);
8102	cfs_b->quota = quota;
8103
8104	__refill_cfs_bandwidth_runtime(cfs_b);
8105	/* restart the period timer (if active) to handle new period expiry */
8106	if (runtime_enabled && cfs_b->timer_active) {
8107		/* force a reprogram */
8108		cfs_b->timer_active = 0;
8109		__start_cfs_bandwidth(cfs_b);
8110	}
8111	raw_spin_unlock_irq(&cfs_b->lock);
8112
8113	for_each_possible_cpu(i) {
8114		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
8115		struct rq *rq = cfs_rq->rq;
8116
8117		raw_spin_lock_irq(&rq->lock);
8118		cfs_rq->runtime_enabled = runtime_enabled;
8119		cfs_rq->runtime_remaining = 0;
8120
8121		if (cfs_rq->throttled)
8122			unthrottle_cfs_rq(cfs_rq);
8123		raw_spin_unlock_irq(&rq->lock);
8124	}
8125out_unlock:
8126	mutex_unlock(&cfs_constraints_mutex);
8127
8128	return ret;
8129}
8130
8131int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8132{
8133	u64 quota, period;
8134
8135	period = ktime_to_ns(tg->cfs_bandwidth.period);
8136	if (cfs_quota_us < 0)
8137		quota = RUNTIME_INF;
8138	else
8139		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8140
8141	return tg_set_cfs_bandwidth(tg, period, quota);
8142}
8143
8144long tg_get_cfs_quota(struct task_group *tg)
8145{
8146	u64 quota_us;
8147
8148	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
8149		return -1;
8150
8151	quota_us = tg->cfs_bandwidth.quota;
8152	do_div(quota_us, NSEC_PER_USEC);
8153
8154	return quota_us;
8155}
8156
8157int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8158{
8159	u64 quota, period;
8160
8161	period = (u64)cfs_period_us * NSEC_PER_USEC;
8162	quota = tg->cfs_bandwidth.quota;
8163
8164	return tg_set_cfs_bandwidth(tg, period, quota);
8165}
8166
8167long tg_get_cfs_period(struct task_group *tg)
8168{
8169	u64 cfs_period_us;
8170
8171	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
8172	do_div(cfs_period_us, NSEC_PER_USEC);
8173
8174	return cfs_period_us;
8175}
8176
8177static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
8178{
8179	return tg_get_cfs_quota(cgroup_tg(cgrp));
8180}
8181
8182static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
8183				s64 cfs_quota_us)
8184{
8185	return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
8186}
8187
8188static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
8189{
8190	return tg_get_cfs_period(cgroup_tg(cgrp));
8191}
8192
8193static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
8194				u64 cfs_period_us)
8195{
8196	return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
8197}
8198
8199struct cfs_schedulable_data {
8200	struct task_group *tg;
8201	u64 period, quota;
8202};
8203
8204/*
8205 * normalize group quota/period to be quota/max_period
8206 * note: units are usecs
8207 */
8208static u64 normalize_cfs_quota(struct task_group *tg,
8209			       struct cfs_schedulable_data *d)
8210{
8211	u64 quota, period;
8212
8213	if (tg == d->tg) {
8214		period = d->period;
8215		quota = d->quota;
8216	} else {
8217		period = tg_get_cfs_period(tg);
8218		quota = tg_get_cfs_quota(tg);
8219	}
8220
8221	/* note: these should typically be equivalent */
8222	if (quota == RUNTIME_INF || quota == -1)
8223		return RUNTIME_INF;
8224
8225	return to_ratio(period, quota);
8226}
8227
8228static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8229{
8230	struct cfs_schedulable_data *d = data;
8231	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8232	s64 quota = 0, parent_quota = -1;
8233
8234	if (!tg->parent) {
8235		quota = RUNTIME_INF;
8236	} else {
8237		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8238
8239		quota = normalize_cfs_quota(tg, d);
8240		parent_quota = parent_b->hierarchal_quota;
8241
8242		/*
8243		 * ensure max(child_quota) <= parent_quota, inherit when no
8244		 * limit is set
8245		 */
8246		if (quota == RUNTIME_INF)
8247			quota = parent_quota;
8248		else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8249			return -EINVAL;
8250	}
8251	cfs_b->hierarchal_quota = quota;
8252
8253	return 0;
8254}
8255
8256static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8257{
8258	int ret;
8259	struct cfs_schedulable_data data = {
8260		.tg = tg,
8261		.period = period,
8262		.quota = quota,
8263	};
8264
8265	if (quota != RUNTIME_INF) {
8266		do_div(data.period, NSEC_PER_USEC);
8267		do_div(data.quota, NSEC_PER_USEC);
8268	}
8269
8270	rcu_read_lock();
8271	ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8272	rcu_read_unlock();
8273
8274	return ret;
8275}
8276
8277static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
8278		struct cgroup_map_cb *cb)
8279{
8280	struct task_group *tg = cgroup_tg(cgrp);
8281	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8282
8283	cb->fill(cb, "nr_periods", cfs_b->nr_periods);
8284	cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
8285	cb->fill(cb, "throttled_time", cfs_b->throttled_time);
8286
8287	return 0;
8288}
8289#endif /* CONFIG_CFS_BANDWIDTH */
8290#endif /* CONFIG_FAIR_GROUP_SCHED */
8291
8292#ifdef CONFIG_RT_GROUP_SCHED
8293static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
8294				s64 val)
8295{
8296	return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
8297}
8298
8299static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
8300{
8301	return sched_group_rt_runtime(cgroup_tg(cgrp));
8302}
8303
8304static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8305		u64 rt_period_us)
8306{
8307	return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
8308}
8309
8310static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8311{
8312	return sched_group_rt_period(cgroup_tg(cgrp));
8313}
8314#endif /* CONFIG_RT_GROUP_SCHED */
8315
8316static struct cftype cpu_files[] = {
8317#ifdef CONFIG_FAIR_GROUP_SCHED
8318	{
8319		.name = "shares",
8320		.read_u64 = cpu_shares_read_u64,
8321		.write_u64 = cpu_shares_write_u64,
8322	},
8323#endif
8324#ifdef CONFIG_CFS_BANDWIDTH
8325	{
8326		.name = "cfs_quota_us",
8327		.read_s64 = cpu_cfs_quota_read_s64,
8328		.write_s64 = cpu_cfs_quota_write_s64,
8329	},
8330	{
8331		.name = "cfs_period_us",
8332		.read_u64 = cpu_cfs_period_read_u64,
8333		.write_u64 = cpu_cfs_period_write_u64,
8334	},
8335	{
8336		.name = "stat",
8337		.read_map = cpu_stats_show,
8338	},
8339#endif
8340#ifdef CONFIG_RT_GROUP_SCHED
8341	{
8342		.name = "rt_runtime_us",
8343		.read_s64 = cpu_rt_runtime_read,
8344		.write_s64 = cpu_rt_runtime_write,
8345	},
8346	{
8347		.name = "rt_period_us",
8348		.read_u64 = cpu_rt_period_read_uint,
8349		.write_u64 = cpu_rt_period_write_uint,
8350	},
8351#endif
8352	{ }	/* terminate */
8353};
8354
8355struct cgroup_subsys cpu_cgroup_subsys = {
8356	.name		= "cpu",
8357	.create		= cpu_cgroup_create,
8358	.destroy	= cpu_cgroup_destroy,
8359	.can_attach	= cpu_cgroup_can_attach,
8360	.attach		= cpu_cgroup_attach,
8361	.exit		= cpu_cgroup_exit,
8362	.subsys_id	= cpu_cgroup_subsys_id,
8363	.base_cftypes	= cpu_files,
8364	.early_init	= 1,
8365};
8366
8367#endif	/* CONFIG_CGROUP_SCHED */
8368
8369#ifdef CONFIG_CGROUP_CPUACCT
8370
8371/*
8372 * CPU accounting code for task groups.
8373 *
8374 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
8375 * (balbir@in.ibm.com).
8376 */
8377
8378/* create a new cpu accounting group */
8379static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
8380{
8381	struct cpuacct *ca;
8382
8383	if (!cgrp->parent)
8384		return &root_cpuacct.css;
8385
8386	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
8387	if (!ca)
8388		goto out;
8389
8390	ca->cpuusage = alloc_percpu(u64);
8391	if (!ca->cpuusage)
8392		goto out_free_ca;
8393
8394	ca->cpustat = alloc_percpu(struct kernel_cpustat);
8395	if (!ca->cpustat)
8396		goto out_free_cpuusage;
8397
8398	return &ca->css;
8399
8400out_free_cpuusage:
8401	free_percpu(ca->cpuusage);
8402out_free_ca:
8403	kfree(ca);
8404out:
8405	return ERR_PTR(-ENOMEM);
8406}
8407
8408/* destroy an existing cpu accounting group */
8409static void cpuacct_destroy(struct cgroup *cgrp)
8410{
8411	struct cpuacct *ca = cgroup_ca(cgrp);
8412
8413	free_percpu(ca->cpustat);
8414	free_percpu(ca->cpuusage);
8415	kfree(ca);
8416}
8417
8418static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
8419{
8420	u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
8421	u64 data;
8422
8423#ifndef CONFIG_64BIT
8424	/*
8425	 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
8426	 */
8427	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
8428	data = *cpuusage;
8429	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
8430#else
8431	data = *cpuusage;
8432#endif
8433
8434	return data;
8435}
8436
8437static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
8438{
8439	u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
8440
8441#ifndef CONFIG_64BIT
8442	/*
8443	 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
8444	 */
8445	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
8446	*cpuusage = val;
8447	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
8448#else
8449	*cpuusage = val;
8450#endif
8451}
8452
8453/* return total cpu usage (in nanoseconds) of a group */
8454static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
8455{
8456	struct cpuacct *ca = cgroup_ca(cgrp);
8457	u64 totalcpuusage = 0;
8458	int i;
8459
8460	for_each_present_cpu(i)
8461		totalcpuusage += cpuacct_cpuusage_read(ca, i);
8462
8463	return totalcpuusage;
8464}
8465
8466static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
8467								u64 reset)
8468{
8469	struct cpuacct *ca = cgroup_ca(cgrp);
8470	int err = 0;
8471	int i;
8472
8473	if (reset) {
8474		err = -EINVAL;
8475		goto out;
8476	}
8477
8478	for_each_present_cpu(i)
8479		cpuacct_cpuusage_write(ca, i, 0);
8480
8481out:
8482	return err;
8483}
8484
8485static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
8486				   struct seq_file *m)
8487{
8488	struct cpuacct *ca = cgroup_ca(cgroup);
8489	u64 percpu;
8490	int i;
8491
8492	for_each_present_cpu(i) {
8493		percpu = cpuacct_cpuusage_read(ca, i);
8494		seq_printf(m, "%llu ", (unsigned long long) percpu);
8495	}
8496	seq_printf(m, "\n");
8497	return 0;
8498}
8499
8500static const char *cpuacct_stat_desc[] = {
8501	[CPUACCT_STAT_USER] = "user",
8502	[CPUACCT_STAT_SYSTEM] = "system",
8503};
8504
8505static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
8506			      struct cgroup_map_cb *cb)
8507{
8508	struct cpuacct *ca = cgroup_ca(cgrp);
8509	int cpu;
8510	s64 val = 0;
8511
8512	for_each_online_cpu(cpu) {
8513		struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
8514		val += kcpustat->cpustat[CPUTIME_USER];
8515		val += kcpustat->cpustat[CPUTIME_NICE];
8516	}
8517	val = cputime64_to_clock_t(val);
8518	cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
8519
8520	val = 0;
8521	for_each_online_cpu(cpu) {
8522		struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
8523		val += kcpustat->cpustat[CPUTIME_SYSTEM];
8524		val += kcpustat->cpustat[CPUTIME_IRQ];
8525		val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
8526	}
8527
8528	val = cputime64_to_clock_t(val);
8529	cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
8530
8531	return 0;
8532}
8533
8534static struct cftype files[] = {
8535	{
8536		.name = "usage",
8537		.read_u64 = cpuusage_read,
8538		.write_u64 = cpuusage_write,
8539	},
8540	{
8541		.name = "usage_percpu",
8542		.read_seq_string = cpuacct_percpu_seq_read,
8543	},
8544	{
8545		.name = "stat",
8546		.read_map = cpuacct_stats_show,
8547	},
8548	{ }	/* terminate */
8549};
8550
8551/*
8552 * charge this task's execution time to its accounting group.
8553 *
8554 * called with rq->lock held.
8555 */
8556void cpuacct_charge(struct task_struct *tsk, u64 cputime)
8557{
8558	struct cpuacct *ca;
8559	int cpu;
8560
8561	if (unlikely(!cpuacct_subsys.active))
8562		return;
8563
8564	cpu = task_cpu(tsk);
8565
8566	rcu_read_lock();
8567
8568	ca = task_ca(tsk);
8569
8570	for (; ca; ca = parent_ca(ca)) {
8571		u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
8572		*cpuusage += cputime;
8573	}
8574
8575	rcu_read_unlock();
8576}
8577
8578struct cgroup_subsys cpuacct_subsys = {
8579	.name = "cpuacct",
8580	.create = cpuacct_create,
8581	.destroy = cpuacct_destroy,
8582	.subsys_id = cpuacct_subsys_id,
8583	.base_cftypes = files,
8584};
8585#endif	/* CONFIG_CGROUP_CPUACCT */
8586