core.c revision 4314895165623879937f46d767673654662b570c
1/*
2 *  kernel/sched/core.c
3 *
4 *  Kernel scheduler and related syscalls
5 *
6 *  Copyright (C) 1991-2002  Linus Torvalds
7 *
8 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9 *		make semaphores SMP safe
10 *  1998-11-19	Implemented schedule_timeout() and related stuff
11 *		by Andrea Arcangeli
12 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
13 *		hybrid priority-list and round-robin design with
14 *		an array-switch method of distributing timeslices
15 *		and per-CPU runqueues.  Cleanups and useful suggestions
16 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
17 *  2003-09-03	Interactivity tuning by Con Kolivas.
18 *  2004-04-02	Scheduler domains code by Nick Piggin
19 *  2007-04-15  Work begun on replacing all interactivity tuning with a
20 *              fair scheduling design by Con Kolivas.
21 *  2007-05-05  Load balancing (smp-nice) and other improvements
22 *              by Peter Williams
23 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
24 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
25 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 *              Thomas Gleixner, Mike Kravetz
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
33#include <linux/uaccess.h>
34#include <linux/highmem.h>
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
37#include <linux/capability.h>
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
40#include <linux/debug_locks.h>
41#include <linux/perf_event.h>
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
45#include <linux/freezer.h>
46#include <linux/vmalloc.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/pid_namespace.h>
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
57#include <linux/proc_fs.h>
58#include <linux/seq_file.h>
59#include <linux/sysctl.h>
60#include <linux/syscalls.h>
61#include <linux/times.h>
62#include <linux/tsacct_kern.h>
63#include <linux/kprobes.h>
64#include <linux/delayacct.h>
65#include <linux/unistd.h>
66#include <linux/pagemap.h>
67#include <linux/hrtimer.h>
68#include <linux/tick.h>
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
71#include <linux/ftrace.h>
72#include <linux/slab.h>
73#include <linux/init_task.h>
74#include <linux/binfmts.h>
75#include <linux/context_tracking.h>
76
77#include <asm/switch_to.h>
78#include <asm/tlb.h>
79#include <asm/irq_regs.h>
80#include <asm/mutex.h>
81#ifdef CONFIG_PARAVIRT
82#include <asm/paravirt.h>
83#endif
84
85#include "sched.h"
86#include "../workqueue_internal.h"
87#include "../smpboot.h"
88
89#define CREATE_TRACE_POINTS
90#include <trace/events/sched.h>
91
92void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
93{
94	unsigned long delta;
95	ktime_t soft, hard, now;
96
97	for (;;) {
98		if (hrtimer_active(period_timer))
99			break;
100
101		now = hrtimer_cb_get_time(period_timer);
102		hrtimer_forward(period_timer, now, period);
103
104		soft = hrtimer_get_softexpires(period_timer);
105		hard = hrtimer_get_expires(period_timer);
106		delta = ktime_to_ns(ktime_sub(hard, soft));
107		__hrtimer_start_range_ns(period_timer, soft, delta,
108					 HRTIMER_MODE_ABS_PINNED, 0);
109	}
110}
111
112DEFINE_MUTEX(sched_domains_mutex);
113DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
114
115static void update_rq_clock_task(struct rq *rq, s64 delta);
116
117void update_rq_clock(struct rq *rq)
118{
119	s64 delta;
120
121	if (rq->skip_clock_update > 0)
122		return;
123
124	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
125	rq->clock += delta;
126	update_rq_clock_task(rq, delta);
127}
128
129/*
130 * Debugging: various feature bits
131 */
132
133#define SCHED_FEAT(name, enabled)	\
134	(1UL << __SCHED_FEAT_##name) * enabled |
135
136const_debug unsigned int sysctl_sched_features =
137#include "features.h"
138	0;
139
140#undef SCHED_FEAT
141
142#ifdef CONFIG_SCHED_DEBUG
143#define SCHED_FEAT(name, enabled)	\
144	#name ,
145
146static const char * const sched_feat_names[] = {
147#include "features.h"
148};
149
150#undef SCHED_FEAT
151
152static int sched_feat_show(struct seq_file *m, void *v)
153{
154	int i;
155
156	for (i = 0; i < __SCHED_FEAT_NR; i++) {
157		if (!(sysctl_sched_features & (1UL << i)))
158			seq_puts(m, "NO_");
159		seq_printf(m, "%s ", sched_feat_names[i]);
160	}
161	seq_puts(m, "\n");
162
163	return 0;
164}
165
166#ifdef HAVE_JUMP_LABEL
167
168#define jump_label_key__true  STATIC_KEY_INIT_TRUE
169#define jump_label_key__false STATIC_KEY_INIT_FALSE
170
171#define SCHED_FEAT(name, enabled)	\
172	jump_label_key__##enabled ,
173
174struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
175#include "features.h"
176};
177
178#undef SCHED_FEAT
179
180static void sched_feat_disable(int i)
181{
182	if (static_key_enabled(&sched_feat_keys[i]))
183		static_key_slow_dec(&sched_feat_keys[i]);
184}
185
186static void sched_feat_enable(int i)
187{
188	if (!static_key_enabled(&sched_feat_keys[i]))
189		static_key_slow_inc(&sched_feat_keys[i]);
190}
191#else
192static void sched_feat_disable(int i) { };
193static void sched_feat_enable(int i) { };
194#endif /* HAVE_JUMP_LABEL */
195
196static int sched_feat_set(char *cmp)
197{
198	int i;
199	int neg = 0;
200
201	if (strncmp(cmp, "NO_", 3) == 0) {
202		neg = 1;
203		cmp += 3;
204	}
205
206	for (i = 0; i < __SCHED_FEAT_NR; i++) {
207		if (strcmp(cmp, sched_feat_names[i]) == 0) {
208			if (neg) {
209				sysctl_sched_features &= ~(1UL << i);
210				sched_feat_disable(i);
211			} else {
212				sysctl_sched_features |= (1UL << i);
213				sched_feat_enable(i);
214			}
215			break;
216		}
217	}
218
219	return i;
220}
221
222static ssize_t
223sched_feat_write(struct file *filp, const char __user *ubuf,
224		size_t cnt, loff_t *ppos)
225{
226	char buf[64];
227	char *cmp;
228	int i;
229
230	if (cnt > 63)
231		cnt = 63;
232
233	if (copy_from_user(&buf, ubuf, cnt))
234		return -EFAULT;
235
236	buf[cnt] = 0;
237	cmp = strstrip(buf);
238
239	i = sched_feat_set(cmp);
240	if (i == __SCHED_FEAT_NR)
241		return -EINVAL;
242
243	*ppos += cnt;
244
245	return cnt;
246}
247
248static int sched_feat_open(struct inode *inode, struct file *filp)
249{
250	return single_open(filp, sched_feat_show, NULL);
251}
252
253static const struct file_operations sched_feat_fops = {
254	.open		= sched_feat_open,
255	.write		= sched_feat_write,
256	.read		= seq_read,
257	.llseek		= seq_lseek,
258	.release	= single_release,
259};
260
261static __init int sched_init_debug(void)
262{
263	debugfs_create_file("sched_features", 0644, NULL, NULL,
264			&sched_feat_fops);
265
266	return 0;
267}
268late_initcall(sched_init_debug);
269#endif /* CONFIG_SCHED_DEBUG */
270
271/*
272 * Number of tasks to iterate in a single balance run.
273 * Limited because this is done with IRQs disabled.
274 */
275const_debug unsigned int sysctl_sched_nr_migrate = 32;
276
277/*
278 * period over which we average the RT time consumption, measured
279 * in ms.
280 *
281 * default: 1s
282 */
283const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
284
285/*
286 * period over which we measure -rt task cpu usage in us.
287 * default: 1s
288 */
289unsigned int sysctl_sched_rt_period = 1000000;
290
291__read_mostly int scheduler_running;
292
293/*
294 * part of the period that we allow rt tasks to run in us.
295 * default: 0.95s
296 */
297int sysctl_sched_rt_runtime = 950000;
298
299
300
301/*
302 * __task_rq_lock - lock the rq @p resides on.
303 */
304static inline struct rq *__task_rq_lock(struct task_struct *p)
305	__acquires(rq->lock)
306{
307	struct rq *rq;
308
309	lockdep_assert_held(&p->pi_lock);
310
311	for (;;) {
312		rq = task_rq(p);
313		raw_spin_lock(&rq->lock);
314		if (likely(rq == task_rq(p)))
315			return rq;
316		raw_spin_unlock(&rq->lock);
317	}
318}
319
320/*
321 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
322 */
323static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
324	__acquires(p->pi_lock)
325	__acquires(rq->lock)
326{
327	struct rq *rq;
328
329	for (;;) {
330		raw_spin_lock_irqsave(&p->pi_lock, *flags);
331		rq = task_rq(p);
332		raw_spin_lock(&rq->lock);
333		if (likely(rq == task_rq(p)))
334			return rq;
335		raw_spin_unlock(&rq->lock);
336		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
337	}
338}
339
340static void __task_rq_unlock(struct rq *rq)
341	__releases(rq->lock)
342{
343	raw_spin_unlock(&rq->lock);
344}
345
346static inline void
347task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
348	__releases(rq->lock)
349	__releases(p->pi_lock)
350{
351	raw_spin_unlock(&rq->lock);
352	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
353}
354
355/*
356 * this_rq_lock - lock this runqueue and disable interrupts.
357 */
358static struct rq *this_rq_lock(void)
359	__acquires(rq->lock)
360{
361	struct rq *rq;
362
363	local_irq_disable();
364	rq = this_rq();
365	raw_spin_lock(&rq->lock);
366
367	return rq;
368}
369
370#ifdef CONFIG_SCHED_HRTICK
371/*
372 * Use HR-timers to deliver accurate preemption points.
373 */
374
375static void hrtick_clear(struct rq *rq)
376{
377	if (hrtimer_active(&rq->hrtick_timer))
378		hrtimer_cancel(&rq->hrtick_timer);
379}
380
381/*
382 * High-resolution timer tick.
383 * Runs from hardirq context with interrupts disabled.
384 */
385static enum hrtimer_restart hrtick(struct hrtimer *timer)
386{
387	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
388
389	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
390
391	raw_spin_lock(&rq->lock);
392	update_rq_clock(rq);
393	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
394	raw_spin_unlock(&rq->lock);
395
396	return HRTIMER_NORESTART;
397}
398
399#ifdef CONFIG_SMP
400
401static int __hrtick_restart(struct rq *rq)
402{
403	struct hrtimer *timer = &rq->hrtick_timer;
404	ktime_t time = hrtimer_get_softexpires(timer);
405
406	return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
407}
408
409/*
410 * called from hardirq (IPI) context
411 */
412static void __hrtick_start(void *arg)
413{
414	struct rq *rq = arg;
415
416	raw_spin_lock(&rq->lock);
417	__hrtick_restart(rq);
418	rq->hrtick_csd_pending = 0;
419	raw_spin_unlock(&rq->lock);
420}
421
422/*
423 * Called to set the hrtick timer state.
424 *
425 * called with rq->lock held and irqs disabled
426 */
427void hrtick_start(struct rq *rq, u64 delay)
428{
429	struct hrtimer *timer = &rq->hrtick_timer;
430	ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
431
432	hrtimer_set_expires(timer, time);
433
434	if (rq == this_rq()) {
435		__hrtick_restart(rq);
436	} else if (!rq->hrtick_csd_pending) {
437		__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
438		rq->hrtick_csd_pending = 1;
439	}
440}
441
442static int
443hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
444{
445	int cpu = (int)(long)hcpu;
446
447	switch (action) {
448	case CPU_UP_CANCELED:
449	case CPU_UP_CANCELED_FROZEN:
450	case CPU_DOWN_PREPARE:
451	case CPU_DOWN_PREPARE_FROZEN:
452	case CPU_DEAD:
453	case CPU_DEAD_FROZEN:
454		hrtick_clear(cpu_rq(cpu));
455		return NOTIFY_OK;
456	}
457
458	return NOTIFY_DONE;
459}
460
461static __init void init_hrtick(void)
462{
463	hotcpu_notifier(hotplug_hrtick, 0);
464}
465#else
466/*
467 * Called to set the hrtick timer state.
468 *
469 * called with rq->lock held and irqs disabled
470 */
471void hrtick_start(struct rq *rq, u64 delay)
472{
473	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
474			HRTIMER_MODE_REL_PINNED, 0);
475}
476
477static inline void init_hrtick(void)
478{
479}
480#endif /* CONFIG_SMP */
481
482static void init_rq_hrtick(struct rq *rq)
483{
484#ifdef CONFIG_SMP
485	rq->hrtick_csd_pending = 0;
486
487	rq->hrtick_csd.flags = 0;
488	rq->hrtick_csd.func = __hrtick_start;
489	rq->hrtick_csd.info = rq;
490#endif
491
492	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
493	rq->hrtick_timer.function = hrtick;
494}
495#else	/* CONFIG_SCHED_HRTICK */
496static inline void hrtick_clear(struct rq *rq)
497{
498}
499
500static inline void init_rq_hrtick(struct rq *rq)
501{
502}
503
504static inline void init_hrtick(void)
505{
506}
507#endif	/* CONFIG_SCHED_HRTICK */
508
509/*
510 * resched_task - mark a task 'to be rescheduled now'.
511 *
512 * On UP this means the setting of the need_resched flag, on SMP it
513 * might also involve a cross-CPU call to trigger the scheduler on
514 * the target CPU.
515 */
516#ifdef CONFIG_SMP
517void resched_task(struct task_struct *p)
518{
519	int cpu;
520
521	assert_raw_spin_locked(&task_rq(p)->lock);
522
523	if (test_tsk_need_resched(p))
524		return;
525
526	set_tsk_need_resched(p);
527
528	cpu = task_cpu(p);
529	if (cpu == smp_processor_id())
530		return;
531
532	/* NEED_RESCHED must be visible before we test polling */
533	smp_mb();
534	if (!tsk_is_polling(p))
535		smp_send_reschedule(cpu);
536}
537
538void resched_cpu(int cpu)
539{
540	struct rq *rq = cpu_rq(cpu);
541	unsigned long flags;
542
543	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
544		return;
545	resched_task(cpu_curr(cpu));
546	raw_spin_unlock_irqrestore(&rq->lock, flags);
547}
548
549#ifdef CONFIG_NO_HZ_COMMON
550/*
551 * In the semi idle case, use the nearest busy cpu for migrating timers
552 * from an idle cpu.  This is good for power-savings.
553 *
554 * We don't do similar optimization for completely idle system, as
555 * selecting an idle cpu will add more delays to the timers than intended
556 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
557 */
558int get_nohz_timer_target(void)
559{
560	int cpu = smp_processor_id();
561	int i;
562	struct sched_domain *sd;
563
564	rcu_read_lock();
565	for_each_domain(cpu, sd) {
566		for_each_cpu(i, sched_domain_span(sd)) {
567			if (!idle_cpu(i)) {
568				cpu = i;
569				goto unlock;
570			}
571		}
572	}
573unlock:
574	rcu_read_unlock();
575	return cpu;
576}
577/*
578 * When add_timer_on() enqueues a timer into the timer wheel of an
579 * idle CPU then this timer might expire before the next timer event
580 * which is scheduled to wake up that CPU. In case of a completely
581 * idle system the next event might even be infinite time into the
582 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
583 * leaves the inner idle loop so the newly added timer is taken into
584 * account when the CPU goes back to idle and evaluates the timer
585 * wheel for the next timer event.
586 */
587static void wake_up_idle_cpu(int cpu)
588{
589	struct rq *rq = cpu_rq(cpu);
590
591	if (cpu == smp_processor_id())
592		return;
593
594	/*
595	 * This is safe, as this function is called with the timer
596	 * wheel base lock of (cpu) held. When the CPU is on the way
597	 * to idle and has not yet set rq->curr to idle then it will
598	 * be serialized on the timer wheel base lock and take the new
599	 * timer into account automatically.
600	 */
601	if (rq->curr != rq->idle)
602		return;
603
604	/*
605	 * We can set TIF_RESCHED on the idle task of the other CPU
606	 * lockless. The worst case is that the other CPU runs the
607	 * idle task through an additional NOOP schedule()
608	 */
609	set_tsk_need_resched(rq->idle);
610
611	/* NEED_RESCHED must be visible before we test polling */
612	smp_mb();
613	if (!tsk_is_polling(rq->idle))
614		smp_send_reschedule(cpu);
615}
616
617static bool wake_up_full_nohz_cpu(int cpu)
618{
619	if (tick_nohz_full_cpu(cpu)) {
620		if (cpu != smp_processor_id() ||
621		    tick_nohz_tick_stopped())
622			smp_send_reschedule(cpu);
623		return true;
624	}
625
626	return false;
627}
628
629void wake_up_nohz_cpu(int cpu)
630{
631	if (!wake_up_full_nohz_cpu(cpu))
632		wake_up_idle_cpu(cpu);
633}
634
635static inline bool got_nohz_idle_kick(void)
636{
637	int cpu = smp_processor_id();
638
639	if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
640		return false;
641
642	if (idle_cpu(cpu) && !need_resched())
643		return true;
644
645	/*
646	 * We can't run Idle Load Balance on this CPU for this time so we
647	 * cancel it and clear NOHZ_BALANCE_KICK
648	 */
649	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
650	return false;
651}
652
653#else /* CONFIG_NO_HZ_COMMON */
654
655static inline bool got_nohz_idle_kick(void)
656{
657	return false;
658}
659
660#endif /* CONFIG_NO_HZ_COMMON */
661
662#ifdef CONFIG_NO_HZ_FULL
663bool sched_can_stop_tick(void)
664{
665       struct rq *rq;
666
667       rq = this_rq();
668
669       /* Make sure rq->nr_running update is visible after the IPI */
670       smp_rmb();
671
672       /* More than one running task need preemption */
673       if (rq->nr_running > 1)
674               return false;
675
676       return true;
677}
678#endif /* CONFIG_NO_HZ_FULL */
679
680void sched_avg_update(struct rq *rq)
681{
682	s64 period = sched_avg_period();
683
684	while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
685		/*
686		 * Inline assembly required to prevent the compiler
687		 * optimising this loop into a divmod call.
688		 * See __iter_div_u64_rem() for another example of this.
689		 */
690		asm("" : "+rm" (rq->age_stamp));
691		rq->age_stamp += period;
692		rq->rt_avg /= 2;
693	}
694}
695
696#else /* !CONFIG_SMP */
697void resched_task(struct task_struct *p)
698{
699	assert_raw_spin_locked(&task_rq(p)->lock);
700	set_tsk_need_resched(p);
701}
702#endif /* CONFIG_SMP */
703
704#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
705			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
706/*
707 * Iterate task_group tree rooted at *from, calling @down when first entering a
708 * node and @up when leaving it for the final time.
709 *
710 * Caller must hold rcu_lock or sufficient equivalent.
711 */
712int walk_tg_tree_from(struct task_group *from,
713			     tg_visitor down, tg_visitor up, void *data)
714{
715	struct task_group *parent, *child;
716	int ret;
717
718	parent = from;
719
720down:
721	ret = (*down)(parent, data);
722	if (ret)
723		goto out;
724	list_for_each_entry_rcu(child, &parent->children, siblings) {
725		parent = child;
726		goto down;
727
728up:
729		continue;
730	}
731	ret = (*up)(parent, data);
732	if (ret || parent == from)
733		goto out;
734
735	child = parent;
736	parent = parent->parent;
737	if (parent)
738		goto up;
739out:
740	return ret;
741}
742
743int tg_nop(struct task_group *tg, void *data)
744{
745	return 0;
746}
747#endif
748
749static void set_load_weight(struct task_struct *p)
750{
751	int prio = p->static_prio - MAX_RT_PRIO;
752	struct load_weight *load = &p->se.load;
753
754	/*
755	 * SCHED_IDLE tasks get minimal weight:
756	 */
757	if (p->policy == SCHED_IDLE) {
758		load->weight = scale_load(WEIGHT_IDLEPRIO);
759		load->inv_weight = WMULT_IDLEPRIO;
760		return;
761	}
762
763	load->weight = scale_load(prio_to_weight[prio]);
764	load->inv_weight = prio_to_wmult[prio];
765}
766
767static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
768{
769	update_rq_clock(rq);
770	sched_info_queued(rq, p);
771	p->sched_class->enqueue_task(rq, p, flags);
772}
773
774static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
775{
776	update_rq_clock(rq);
777	sched_info_dequeued(rq, p);
778	p->sched_class->dequeue_task(rq, p, flags);
779}
780
781void activate_task(struct rq *rq, struct task_struct *p, int flags)
782{
783	if (task_contributes_to_load(p))
784		rq->nr_uninterruptible--;
785
786	enqueue_task(rq, p, flags);
787}
788
789void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
790{
791	if (task_contributes_to_load(p))
792		rq->nr_uninterruptible++;
793
794	dequeue_task(rq, p, flags);
795}
796
797static void update_rq_clock_task(struct rq *rq, s64 delta)
798{
799/*
800 * In theory, the compile should just see 0 here, and optimize out the call
801 * to sched_rt_avg_update. But I don't trust it...
802 */
803#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
804	s64 steal = 0, irq_delta = 0;
805#endif
806#ifdef CONFIG_IRQ_TIME_ACCOUNTING
807	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
808
809	/*
810	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
811	 * this case when a previous update_rq_clock() happened inside a
812	 * {soft,}irq region.
813	 *
814	 * When this happens, we stop ->clock_task and only update the
815	 * prev_irq_time stamp to account for the part that fit, so that a next
816	 * update will consume the rest. This ensures ->clock_task is
817	 * monotonic.
818	 *
819	 * It does however cause some slight miss-attribution of {soft,}irq
820	 * time, a more accurate solution would be to update the irq_time using
821	 * the current rq->clock timestamp, except that would require using
822	 * atomic ops.
823	 */
824	if (irq_delta > delta)
825		irq_delta = delta;
826
827	rq->prev_irq_time += irq_delta;
828	delta -= irq_delta;
829#endif
830#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
831	if (static_key_false((&paravirt_steal_rq_enabled))) {
832		u64 st;
833
834		steal = paravirt_steal_clock(cpu_of(rq));
835		steal -= rq->prev_steal_time_rq;
836
837		if (unlikely(steal > delta))
838			steal = delta;
839
840		st = steal_ticks(steal);
841		steal = st * TICK_NSEC;
842
843		rq->prev_steal_time_rq += steal;
844
845		delta -= steal;
846	}
847#endif
848
849	rq->clock_task += delta;
850
851#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
852	if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
853		sched_rt_avg_update(rq, irq_delta + steal);
854#endif
855}
856
857void sched_set_stop_task(int cpu, struct task_struct *stop)
858{
859	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
860	struct task_struct *old_stop = cpu_rq(cpu)->stop;
861
862	if (stop) {
863		/*
864		 * Make it appear like a SCHED_FIFO task, its something
865		 * userspace knows about and won't get confused about.
866		 *
867		 * Also, it will make PI more or less work without too
868		 * much confusion -- but then, stop work should not
869		 * rely on PI working anyway.
870		 */
871		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
872
873		stop->sched_class = &stop_sched_class;
874	}
875
876	cpu_rq(cpu)->stop = stop;
877
878	if (old_stop) {
879		/*
880		 * Reset it back to a normal scheduling class so that
881		 * it can die in pieces.
882		 */
883		old_stop->sched_class = &rt_sched_class;
884	}
885}
886
887/*
888 * __normal_prio - return the priority that is based on the static prio
889 */
890static inline int __normal_prio(struct task_struct *p)
891{
892	return p->static_prio;
893}
894
895/*
896 * Calculate the expected normal priority: i.e. priority
897 * without taking RT-inheritance into account. Might be
898 * boosted by interactivity modifiers. Changes upon fork,
899 * setprio syscalls, and whenever the interactivity
900 * estimator recalculates.
901 */
902static inline int normal_prio(struct task_struct *p)
903{
904	int prio;
905
906	if (task_has_rt_policy(p))
907		prio = MAX_RT_PRIO-1 - p->rt_priority;
908	else
909		prio = __normal_prio(p);
910	return prio;
911}
912
913/*
914 * Calculate the current priority, i.e. the priority
915 * taken into account by the scheduler. This value might
916 * be boosted by RT tasks, or might be boosted by
917 * interactivity modifiers. Will be RT if the task got
918 * RT-boosted. If not then it returns p->normal_prio.
919 */
920static int effective_prio(struct task_struct *p)
921{
922	p->normal_prio = normal_prio(p);
923	/*
924	 * If we are RT tasks or we were boosted to RT priority,
925	 * keep the priority unchanged. Otherwise, update priority
926	 * to the normal priority:
927	 */
928	if (!rt_prio(p->prio))
929		return p->normal_prio;
930	return p->prio;
931}
932
933/**
934 * task_curr - is this task currently executing on a CPU?
935 * @p: the task in question.
936 *
937 * Return: 1 if the task is currently executing. 0 otherwise.
938 */
939inline int task_curr(const struct task_struct *p)
940{
941	return cpu_curr(task_cpu(p)) == p;
942}
943
944static inline void check_class_changed(struct rq *rq, struct task_struct *p,
945				       const struct sched_class *prev_class,
946				       int oldprio)
947{
948	if (prev_class != p->sched_class) {
949		if (prev_class->switched_from)
950			prev_class->switched_from(rq, p);
951		p->sched_class->switched_to(rq, p);
952	} else if (oldprio != p->prio)
953		p->sched_class->prio_changed(rq, p, oldprio);
954}
955
956void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
957{
958	const struct sched_class *class;
959
960	if (p->sched_class == rq->curr->sched_class) {
961		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
962	} else {
963		for_each_class(class) {
964			if (class == rq->curr->sched_class)
965				break;
966			if (class == p->sched_class) {
967				resched_task(rq->curr);
968				break;
969			}
970		}
971	}
972
973	/*
974	 * A queue event has occurred, and we're going to schedule.  In
975	 * this case, we can save a useless back to back clock update.
976	 */
977	if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
978		rq->skip_clock_update = 1;
979}
980
981#ifdef CONFIG_SMP
982void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
983{
984#ifdef CONFIG_SCHED_DEBUG
985	/*
986	 * We should never call set_task_cpu() on a blocked task,
987	 * ttwu() will sort out the placement.
988	 */
989	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
990			!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
991
992#ifdef CONFIG_LOCKDEP
993	/*
994	 * The caller should hold either p->pi_lock or rq->lock, when changing
995	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
996	 *
997	 * sched_move_task() holds both and thus holding either pins the cgroup,
998	 * see task_group().
999	 *
1000	 * Furthermore, all task_rq users should acquire both locks, see
1001	 * task_rq_lock().
1002	 */
1003	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1004				      lockdep_is_held(&task_rq(p)->lock)));
1005#endif
1006#endif
1007
1008	trace_sched_migrate_task(p, new_cpu);
1009
1010	if (task_cpu(p) != new_cpu) {
1011		if (p->sched_class->migrate_task_rq)
1012			p->sched_class->migrate_task_rq(p, new_cpu);
1013		p->se.nr_migrations++;
1014		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
1015	}
1016
1017	__set_task_cpu(p, new_cpu);
1018}
1019
1020struct migration_arg {
1021	struct task_struct *task;
1022	int dest_cpu;
1023};
1024
1025static int migration_cpu_stop(void *data);
1026
1027/*
1028 * wait_task_inactive - wait for a thread to unschedule.
1029 *
1030 * If @match_state is nonzero, it's the @p->state value just checked and
1031 * not expected to change.  If it changes, i.e. @p might have woken up,
1032 * then return zero.  When we succeed in waiting for @p to be off its CPU,
1033 * we return a positive number (its total switch count).  If a second call
1034 * a short while later returns the same number, the caller can be sure that
1035 * @p has remained unscheduled the whole time.
1036 *
1037 * The caller must ensure that the task *will* unschedule sometime soon,
1038 * else this function might spin for a *long* time. This function can't
1039 * be called with interrupts off, or it may introduce deadlock with
1040 * smp_call_function() if an IPI is sent by the same process we are
1041 * waiting to become inactive.
1042 */
1043unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1044{
1045	unsigned long flags;
1046	int running, on_rq;
1047	unsigned long ncsw;
1048	struct rq *rq;
1049
1050	for (;;) {
1051		/*
1052		 * We do the initial early heuristics without holding
1053		 * any task-queue locks at all. We'll only try to get
1054		 * the runqueue lock when things look like they will
1055		 * work out!
1056		 */
1057		rq = task_rq(p);
1058
1059		/*
1060		 * If the task is actively running on another CPU
1061		 * still, just relax and busy-wait without holding
1062		 * any locks.
1063		 *
1064		 * NOTE! Since we don't hold any locks, it's not
1065		 * even sure that "rq" stays as the right runqueue!
1066		 * But we don't care, since "task_running()" will
1067		 * return false if the runqueue has changed and p
1068		 * is actually now running somewhere else!
1069		 */
1070		while (task_running(rq, p)) {
1071			if (match_state && unlikely(p->state != match_state))
1072				return 0;
1073			cpu_relax();
1074		}
1075
1076		/*
1077		 * Ok, time to look more closely! We need the rq
1078		 * lock now, to be *sure*. If we're wrong, we'll
1079		 * just go back and repeat.
1080		 */
1081		rq = task_rq_lock(p, &flags);
1082		trace_sched_wait_task(p);
1083		running = task_running(rq, p);
1084		on_rq = p->on_rq;
1085		ncsw = 0;
1086		if (!match_state || p->state == match_state)
1087			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1088		task_rq_unlock(rq, p, &flags);
1089
1090		/*
1091		 * If it changed from the expected state, bail out now.
1092		 */
1093		if (unlikely(!ncsw))
1094			break;
1095
1096		/*
1097		 * Was it really running after all now that we
1098		 * checked with the proper locks actually held?
1099		 *
1100		 * Oops. Go back and try again..
1101		 */
1102		if (unlikely(running)) {
1103			cpu_relax();
1104			continue;
1105		}
1106
1107		/*
1108		 * It's not enough that it's not actively running,
1109		 * it must be off the runqueue _entirely_, and not
1110		 * preempted!
1111		 *
1112		 * So if it was still runnable (but just not actively
1113		 * running right now), it's preempted, and we should
1114		 * yield - it could be a while.
1115		 */
1116		if (unlikely(on_rq)) {
1117			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1118
1119			set_current_state(TASK_UNINTERRUPTIBLE);
1120			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1121			continue;
1122		}
1123
1124		/*
1125		 * Ahh, all good. It wasn't running, and it wasn't
1126		 * runnable, which means that it will never become
1127		 * running in the future either. We're all done!
1128		 */
1129		break;
1130	}
1131
1132	return ncsw;
1133}
1134
1135/***
1136 * kick_process - kick a running thread to enter/exit the kernel
1137 * @p: the to-be-kicked thread
1138 *
1139 * Cause a process which is running on another CPU to enter
1140 * kernel-mode, without any delay. (to get signals handled.)
1141 *
1142 * NOTE: this function doesn't have to take the runqueue lock,
1143 * because all it wants to ensure is that the remote task enters
1144 * the kernel. If the IPI races and the task has been migrated
1145 * to another CPU then no harm is done and the purpose has been
1146 * achieved as well.
1147 */
1148void kick_process(struct task_struct *p)
1149{
1150	int cpu;
1151
1152	preempt_disable();
1153	cpu = task_cpu(p);
1154	if ((cpu != smp_processor_id()) && task_curr(p))
1155		smp_send_reschedule(cpu);
1156	preempt_enable();
1157}
1158EXPORT_SYMBOL_GPL(kick_process);
1159#endif /* CONFIG_SMP */
1160
1161#ifdef CONFIG_SMP
1162/*
1163 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1164 */
1165static int select_fallback_rq(int cpu, struct task_struct *p)
1166{
1167	int nid = cpu_to_node(cpu);
1168	const struct cpumask *nodemask = NULL;
1169	enum { cpuset, possible, fail } state = cpuset;
1170	int dest_cpu;
1171
1172	/*
1173	 * If the node that the cpu is on has been offlined, cpu_to_node()
1174	 * will return -1. There is no cpu on the node, and we should
1175	 * select the cpu on the other node.
1176	 */
1177	if (nid != -1) {
1178		nodemask = cpumask_of_node(nid);
1179
1180		/* Look for allowed, online CPU in same node. */
1181		for_each_cpu(dest_cpu, nodemask) {
1182			if (!cpu_online(dest_cpu))
1183				continue;
1184			if (!cpu_active(dest_cpu))
1185				continue;
1186			if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1187				return dest_cpu;
1188		}
1189	}
1190
1191	for (;;) {
1192		/* Any allowed, online CPU? */
1193		for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1194			if (!cpu_online(dest_cpu))
1195				continue;
1196			if (!cpu_active(dest_cpu))
1197				continue;
1198			goto out;
1199		}
1200
1201		switch (state) {
1202		case cpuset:
1203			/* No more Mr. Nice Guy. */
1204			cpuset_cpus_allowed_fallback(p);
1205			state = possible;
1206			break;
1207
1208		case possible:
1209			do_set_cpus_allowed(p, cpu_possible_mask);
1210			state = fail;
1211			break;
1212
1213		case fail:
1214			BUG();
1215			break;
1216		}
1217	}
1218
1219out:
1220	if (state != cpuset) {
1221		/*
1222		 * Don't tell them about moving exiting tasks or
1223		 * kernel threads (both mm NULL), since they never
1224		 * leave kernel.
1225		 */
1226		if (p->mm && printk_ratelimit()) {
1227			printk_sched("process %d (%s) no longer affine to cpu%d\n",
1228					task_pid_nr(p), p->comm, cpu);
1229		}
1230	}
1231
1232	return dest_cpu;
1233}
1234
1235/*
1236 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1237 */
1238static inline
1239int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
1240{
1241	int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
1242
1243	/*
1244	 * In order not to call set_task_cpu() on a blocking task we need
1245	 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1246	 * cpu.
1247	 *
1248	 * Since this is common to all placement strategies, this lives here.
1249	 *
1250	 * [ this allows ->select_task() to simply return task_cpu(p) and
1251	 *   not worry about this generic constraint ]
1252	 */
1253	if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1254		     !cpu_online(cpu)))
1255		cpu = select_fallback_rq(task_cpu(p), p);
1256
1257	return cpu;
1258}
1259
1260static void update_avg(u64 *avg, u64 sample)
1261{
1262	s64 diff = sample - *avg;
1263	*avg += diff >> 3;
1264}
1265#endif
1266
1267static void
1268ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1269{
1270#ifdef CONFIG_SCHEDSTATS
1271	struct rq *rq = this_rq();
1272
1273#ifdef CONFIG_SMP
1274	int this_cpu = smp_processor_id();
1275
1276	if (cpu == this_cpu) {
1277		schedstat_inc(rq, ttwu_local);
1278		schedstat_inc(p, se.statistics.nr_wakeups_local);
1279	} else {
1280		struct sched_domain *sd;
1281
1282		schedstat_inc(p, se.statistics.nr_wakeups_remote);
1283		rcu_read_lock();
1284		for_each_domain(this_cpu, sd) {
1285			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1286				schedstat_inc(sd, ttwu_wake_remote);
1287				break;
1288			}
1289		}
1290		rcu_read_unlock();
1291	}
1292
1293	if (wake_flags & WF_MIGRATED)
1294		schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1295
1296#endif /* CONFIG_SMP */
1297
1298	schedstat_inc(rq, ttwu_count);
1299	schedstat_inc(p, se.statistics.nr_wakeups);
1300
1301	if (wake_flags & WF_SYNC)
1302		schedstat_inc(p, se.statistics.nr_wakeups_sync);
1303
1304#endif /* CONFIG_SCHEDSTATS */
1305}
1306
1307static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1308{
1309	activate_task(rq, p, en_flags);
1310	p->on_rq = 1;
1311
1312	/* if a worker is waking up, notify workqueue */
1313	if (p->flags & PF_WQ_WORKER)
1314		wq_worker_waking_up(p, cpu_of(rq));
1315}
1316
1317/*
1318 * Mark the task runnable and perform wakeup-preemption.
1319 */
1320static void
1321ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1322{
1323	check_preempt_curr(rq, p, wake_flags);
1324	trace_sched_wakeup(p, true);
1325
1326	p->state = TASK_RUNNING;
1327#ifdef CONFIG_SMP
1328	if (p->sched_class->task_woken)
1329		p->sched_class->task_woken(rq, p);
1330
1331	if (rq->idle_stamp) {
1332		u64 delta = rq_clock(rq) - rq->idle_stamp;
1333		u64 max = 2*rq->max_idle_balance_cost;
1334
1335		update_avg(&rq->avg_idle, delta);
1336
1337		if (rq->avg_idle > max)
1338			rq->avg_idle = max;
1339
1340		rq->idle_stamp = 0;
1341	}
1342#endif
1343}
1344
1345static void
1346ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1347{
1348#ifdef CONFIG_SMP
1349	if (p->sched_contributes_to_load)
1350		rq->nr_uninterruptible--;
1351#endif
1352
1353	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1354	ttwu_do_wakeup(rq, p, wake_flags);
1355}
1356
1357/*
1358 * Called in case the task @p isn't fully descheduled from its runqueue,
1359 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1360 * since all we need to do is flip p->state to TASK_RUNNING, since
1361 * the task is still ->on_rq.
1362 */
1363static int ttwu_remote(struct task_struct *p, int wake_flags)
1364{
1365	struct rq *rq;
1366	int ret = 0;
1367
1368	rq = __task_rq_lock(p);
1369	if (p->on_rq) {
1370		/* check_preempt_curr() may use rq clock */
1371		update_rq_clock(rq);
1372		ttwu_do_wakeup(rq, p, wake_flags);
1373		ret = 1;
1374	}
1375	__task_rq_unlock(rq);
1376
1377	return ret;
1378}
1379
1380#ifdef CONFIG_SMP
1381static void sched_ttwu_pending(void)
1382{
1383	struct rq *rq = this_rq();
1384	struct llist_node *llist = llist_del_all(&rq->wake_list);
1385	struct task_struct *p;
1386
1387	raw_spin_lock(&rq->lock);
1388
1389	while (llist) {
1390		p = llist_entry(llist, struct task_struct, wake_entry);
1391		llist = llist_next(llist);
1392		ttwu_do_activate(rq, p, 0);
1393	}
1394
1395	raw_spin_unlock(&rq->lock);
1396}
1397
1398void scheduler_ipi(void)
1399{
1400	if (llist_empty(&this_rq()->wake_list)
1401			&& !tick_nohz_full_cpu(smp_processor_id())
1402			&& !got_nohz_idle_kick())
1403		return;
1404
1405	/*
1406	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1407	 * traditionally all their work was done from the interrupt return
1408	 * path. Now that we actually do some work, we need to make sure
1409	 * we do call them.
1410	 *
1411	 * Some archs already do call them, luckily irq_enter/exit nest
1412	 * properly.
1413	 *
1414	 * Arguably we should visit all archs and update all handlers,
1415	 * however a fair share of IPIs are still resched only so this would
1416	 * somewhat pessimize the simple resched case.
1417	 */
1418	irq_enter();
1419	tick_nohz_full_check();
1420	sched_ttwu_pending();
1421
1422	/*
1423	 * Check if someone kicked us for doing the nohz idle load balance.
1424	 */
1425	if (unlikely(got_nohz_idle_kick())) {
1426		this_rq()->idle_balance = 1;
1427		raise_softirq_irqoff(SCHED_SOFTIRQ);
1428	}
1429	irq_exit();
1430}
1431
1432static void ttwu_queue_remote(struct task_struct *p, int cpu)
1433{
1434	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
1435		smp_send_reschedule(cpu);
1436}
1437
1438bool cpus_share_cache(int this_cpu, int that_cpu)
1439{
1440	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1441}
1442#endif /* CONFIG_SMP */
1443
1444static void ttwu_queue(struct task_struct *p, int cpu)
1445{
1446	struct rq *rq = cpu_rq(cpu);
1447
1448#if defined(CONFIG_SMP)
1449	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1450		sched_clock_cpu(cpu); /* sync clocks x-cpu */
1451		ttwu_queue_remote(p, cpu);
1452		return;
1453	}
1454#endif
1455
1456	raw_spin_lock(&rq->lock);
1457	ttwu_do_activate(rq, p, 0);
1458	raw_spin_unlock(&rq->lock);
1459}
1460
1461/**
1462 * try_to_wake_up - wake up a thread
1463 * @p: the thread to be awakened
1464 * @state: the mask of task states that can be woken
1465 * @wake_flags: wake modifier flags (WF_*)
1466 *
1467 * Put it on the run-queue if it's not already there. The "current"
1468 * thread is always on the run-queue (except when the actual
1469 * re-schedule is in progress), and as such you're allowed to do
1470 * the simpler "current->state = TASK_RUNNING" to mark yourself
1471 * runnable without the overhead of this.
1472 *
1473 * Return: %true if @p was woken up, %false if it was already running.
1474 * or @state didn't match @p's state.
1475 */
1476static int
1477try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1478{
1479	unsigned long flags;
1480	int cpu, success = 0;
1481
1482	/*
1483	 * If we are going to wake up a thread waiting for CONDITION we
1484	 * need to ensure that CONDITION=1 done by the caller can not be
1485	 * reordered with p->state check below. This pairs with mb() in
1486	 * set_current_state() the waiting thread does.
1487	 */
1488	smp_mb__before_spinlock();
1489	raw_spin_lock_irqsave(&p->pi_lock, flags);
1490	if (!(p->state & state))
1491		goto out;
1492
1493	success = 1; /* we're going to change ->state */
1494	cpu = task_cpu(p);
1495
1496	if (p->on_rq && ttwu_remote(p, wake_flags))
1497		goto stat;
1498
1499#ifdef CONFIG_SMP
1500	/*
1501	 * If the owning (remote) cpu is still in the middle of schedule() with
1502	 * this task as prev, wait until its done referencing the task.
1503	 */
1504	while (p->on_cpu)
1505		cpu_relax();
1506	/*
1507	 * Pairs with the smp_wmb() in finish_lock_switch().
1508	 */
1509	smp_rmb();
1510
1511	p->sched_contributes_to_load = !!task_contributes_to_load(p);
1512	p->state = TASK_WAKING;
1513
1514	if (p->sched_class->task_waking)
1515		p->sched_class->task_waking(p);
1516
1517	cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
1518	if (task_cpu(p) != cpu) {
1519		wake_flags |= WF_MIGRATED;
1520		set_task_cpu(p, cpu);
1521	}
1522#endif /* CONFIG_SMP */
1523
1524	ttwu_queue(p, cpu);
1525stat:
1526	ttwu_stat(p, cpu, wake_flags);
1527out:
1528	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1529
1530	return success;
1531}
1532
1533/**
1534 * try_to_wake_up_local - try to wake up a local task with rq lock held
1535 * @p: the thread to be awakened
1536 *
1537 * Put @p on the run-queue if it's not already there. The caller must
1538 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1539 * the current task.
1540 */
1541static void try_to_wake_up_local(struct task_struct *p)
1542{
1543	struct rq *rq = task_rq(p);
1544
1545	if (WARN_ON_ONCE(rq != this_rq()) ||
1546	    WARN_ON_ONCE(p == current))
1547		return;
1548
1549	lockdep_assert_held(&rq->lock);
1550
1551	if (!raw_spin_trylock(&p->pi_lock)) {
1552		raw_spin_unlock(&rq->lock);
1553		raw_spin_lock(&p->pi_lock);
1554		raw_spin_lock(&rq->lock);
1555	}
1556
1557	if (!(p->state & TASK_NORMAL))
1558		goto out;
1559
1560	if (!p->on_rq)
1561		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1562
1563	ttwu_do_wakeup(rq, p, 0);
1564	ttwu_stat(p, smp_processor_id(), 0);
1565out:
1566	raw_spin_unlock(&p->pi_lock);
1567}
1568
1569/**
1570 * wake_up_process - Wake up a specific process
1571 * @p: The process to be woken up.
1572 *
1573 * Attempt to wake up the nominated process and move it to the set of runnable
1574 * processes.
1575 *
1576 * Return: 1 if the process was woken up, 0 if it was already running.
1577 *
1578 * It may be assumed that this function implies a write memory barrier before
1579 * changing the task state if and only if any tasks are woken up.
1580 */
1581int wake_up_process(struct task_struct *p)
1582{
1583	WARN_ON(task_is_stopped_or_traced(p));
1584	return try_to_wake_up(p, TASK_NORMAL, 0);
1585}
1586EXPORT_SYMBOL(wake_up_process);
1587
1588int wake_up_state(struct task_struct *p, unsigned int state)
1589{
1590	return try_to_wake_up(p, state, 0);
1591}
1592
1593/*
1594 * Perform scheduler related setup for a newly forked process p.
1595 * p is forked by current.
1596 *
1597 * __sched_fork() is basic setup used by init_idle() too:
1598 */
1599static void __sched_fork(struct task_struct *p)
1600{
1601	p->on_rq			= 0;
1602
1603	p->se.on_rq			= 0;
1604	p->se.exec_start		= 0;
1605	p->se.sum_exec_runtime		= 0;
1606	p->se.prev_sum_exec_runtime	= 0;
1607	p->se.nr_migrations		= 0;
1608	p->se.vruntime			= 0;
1609	INIT_LIST_HEAD(&p->se.group_node);
1610
1611#ifdef CONFIG_SCHEDSTATS
1612	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1613#endif
1614
1615	INIT_LIST_HEAD(&p->rt.run_list);
1616
1617#ifdef CONFIG_PREEMPT_NOTIFIERS
1618	INIT_HLIST_HEAD(&p->preempt_notifiers);
1619#endif
1620
1621#ifdef CONFIG_NUMA_BALANCING
1622	if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
1623		p->mm->numa_next_scan = jiffies;
1624		p->mm->numa_next_reset = jiffies;
1625		p->mm->numa_scan_seq = 0;
1626	}
1627
1628	p->node_stamp = 0ULL;
1629	p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
1630	p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
1631	p->numa_scan_period = sysctl_numa_balancing_scan_delay;
1632	p->numa_work.next = &p->numa_work;
1633#endif /* CONFIG_NUMA_BALANCING */
1634}
1635
1636#ifdef CONFIG_NUMA_BALANCING
1637#ifdef CONFIG_SCHED_DEBUG
1638void set_numabalancing_state(bool enabled)
1639{
1640	if (enabled)
1641		sched_feat_set("NUMA");
1642	else
1643		sched_feat_set("NO_NUMA");
1644}
1645#else
1646__read_mostly bool numabalancing_enabled;
1647
1648void set_numabalancing_state(bool enabled)
1649{
1650	numabalancing_enabled = enabled;
1651}
1652#endif /* CONFIG_SCHED_DEBUG */
1653#endif /* CONFIG_NUMA_BALANCING */
1654
1655/*
1656 * fork()/clone()-time setup:
1657 */
1658void sched_fork(struct task_struct *p)
1659{
1660	unsigned long flags;
1661	int cpu = get_cpu();
1662
1663	__sched_fork(p);
1664	/*
1665	 * We mark the process as running here. This guarantees that
1666	 * nobody will actually run it, and a signal or other external
1667	 * event cannot wake it up and insert it on the runqueue either.
1668	 */
1669	p->state = TASK_RUNNING;
1670
1671	/*
1672	 * Make sure we do not leak PI boosting priority to the child.
1673	 */
1674	p->prio = current->normal_prio;
1675
1676	/*
1677	 * Revert to default priority/policy on fork if requested.
1678	 */
1679	if (unlikely(p->sched_reset_on_fork)) {
1680		if (task_has_rt_policy(p)) {
1681			p->policy = SCHED_NORMAL;
1682			p->static_prio = NICE_TO_PRIO(0);
1683			p->rt_priority = 0;
1684		} else if (PRIO_TO_NICE(p->static_prio) < 0)
1685			p->static_prio = NICE_TO_PRIO(0);
1686
1687		p->prio = p->normal_prio = __normal_prio(p);
1688		set_load_weight(p);
1689
1690		/*
1691		 * We don't need the reset flag anymore after the fork. It has
1692		 * fulfilled its duty:
1693		 */
1694		p->sched_reset_on_fork = 0;
1695	}
1696
1697	if (!rt_prio(p->prio))
1698		p->sched_class = &fair_sched_class;
1699
1700	if (p->sched_class->task_fork)
1701		p->sched_class->task_fork(p);
1702
1703	/*
1704	 * The child is not yet in the pid-hash so no cgroup attach races,
1705	 * and the cgroup is pinned to this child due to cgroup_fork()
1706	 * is ran before sched_fork().
1707	 *
1708	 * Silence PROVE_RCU.
1709	 */
1710	raw_spin_lock_irqsave(&p->pi_lock, flags);
1711	set_task_cpu(p, cpu);
1712	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1713
1714#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1715	if (likely(sched_info_on()))
1716		memset(&p->sched_info, 0, sizeof(p->sched_info));
1717#endif
1718#if defined(CONFIG_SMP)
1719	p->on_cpu = 0;
1720#endif
1721#ifdef CONFIG_PREEMPT_COUNT
1722	/* Want to start with kernel preemption disabled. */
1723	task_thread_info(p)->preempt_count = 1;
1724#endif
1725#ifdef CONFIG_SMP
1726	plist_node_init(&p->pushable_tasks, MAX_PRIO);
1727#endif
1728
1729	put_cpu();
1730}
1731
1732/*
1733 * wake_up_new_task - wake up a newly created task for the first time.
1734 *
1735 * This function will do some initial scheduler statistics housekeeping
1736 * that must be done for every newly created context, then puts the task
1737 * on the runqueue and wakes it.
1738 */
1739void wake_up_new_task(struct task_struct *p)
1740{
1741	unsigned long flags;
1742	struct rq *rq;
1743
1744	raw_spin_lock_irqsave(&p->pi_lock, flags);
1745#ifdef CONFIG_SMP
1746	/*
1747	 * Fork balancing, do it here and not earlier because:
1748	 *  - cpus_allowed can change in the fork path
1749	 *  - any previously selected cpu might disappear through hotplug
1750	 */
1751	set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
1752#endif
1753
1754	/* Initialize new task's runnable average */
1755	init_task_runnable_average(p);
1756	rq = __task_rq_lock(p);
1757	activate_task(rq, p, 0);
1758	p->on_rq = 1;
1759	trace_sched_wakeup_new(p, true);
1760	check_preempt_curr(rq, p, WF_FORK);
1761#ifdef CONFIG_SMP
1762	if (p->sched_class->task_woken)
1763		p->sched_class->task_woken(rq, p);
1764#endif
1765	task_rq_unlock(rq, p, &flags);
1766}
1767
1768#ifdef CONFIG_PREEMPT_NOTIFIERS
1769
1770/**
1771 * preempt_notifier_register - tell me when current is being preempted & rescheduled
1772 * @notifier: notifier struct to register
1773 */
1774void preempt_notifier_register(struct preempt_notifier *notifier)
1775{
1776	hlist_add_head(&notifier->link, &current->preempt_notifiers);
1777}
1778EXPORT_SYMBOL_GPL(preempt_notifier_register);
1779
1780/**
1781 * preempt_notifier_unregister - no longer interested in preemption notifications
1782 * @notifier: notifier struct to unregister
1783 *
1784 * This is safe to call from within a preemption notifier.
1785 */
1786void preempt_notifier_unregister(struct preempt_notifier *notifier)
1787{
1788	hlist_del(&notifier->link);
1789}
1790EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1791
1792static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1793{
1794	struct preempt_notifier *notifier;
1795
1796	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1797		notifier->ops->sched_in(notifier, raw_smp_processor_id());
1798}
1799
1800static void
1801fire_sched_out_preempt_notifiers(struct task_struct *curr,
1802				 struct task_struct *next)
1803{
1804	struct preempt_notifier *notifier;
1805
1806	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1807		notifier->ops->sched_out(notifier, next);
1808}
1809
1810#else /* !CONFIG_PREEMPT_NOTIFIERS */
1811
1812static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1813{
1814}
1815
1816static void
1817fire_sched_out_preempt_notifiers(struct task_struct *curr,
1818				 struct task_struct *next)
1819{
1820}
1821
1822#endif /* CONFIG_PREEMPT_NOTIFIERS */
1823
1824/**
1825 * prepare_task_switch - prepare to switch tasks
1826 * @rq: the runqueue preparing to switch
1827 * @prev: the current task that is being switched out
1828 * @next: the task we are going to switch to.
1829 *
1830 * This is called with the rq lock held and interrupts off. It must
1831 * be paired with a subsequent finish_task_switch after the context
1832 * switch.
1833 *
1834 * prepare_task_switch sets up locking and calls architecture specific
1835 * hooks.
1836 */
1837static inline void
1838prepare_task_switch(struct rq *rq, struct task_struct *prev,
1839		    struct task_struct *next)
1840{
1841	trace_sched_switch(prev, next);
1842	sched_info_switch(rq, prev, next);
1843	perf_event_task_sched_out(prev, next);
1844	fire_sched_out_preempt_notifiers(prev, next);
1845	prepare_lock_switch(rq, next);
1846	prepare_arch_switch(next);
1847}
1848
1849/**
1850 * finish_task_switch - clean up after a task-switch
1851 * @rq: runqueue associated with task-switch
1852 * @prev: the thread we just switched away from.
1853 *
1854 * finish_task_switch must be called after the context switch, paired
1855 * with a prepare_task_switch call before the context switch.
1856 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1857 * and do any other architecture-specific cleanup actions.
1858 *
1859 * Note that we may have delayed dropping an mm in context_switch(). If
1860 * so, we finish that here outside of the runqueue lock. (Doing it
1861 * with the lock held can cause deadlocks; see schedule() for
1862 * details.)
1863 */
1864static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1865	__releases(rq->lock)
1866{
1867	struct mm_struct *mm = rq->prev_mm;
1868	long prev_state;
1869
1870	rq->prev_mm = NULL;
1871
1872	/*
1873	 * A task struct has one reference for the use as "current".
1874	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1875	 * schedule one last time. The schedule call will never return, and
1876	 * the scheduled task must drop that reference.
1877	 * The test for TASK_DEAD must occur while the runqueue locks are
1878	 * still held, otherwise prev could be scheduled on another cpu, die
1879	 * there before we look at prev->state, and then the reference would
1880	 * be dropped twice.
1881	 *		Manfred Spraul <manfred@colorfullife.com>
1882	 */
1883	prev_state = prev->state;
1884	vtime_task_switch(prev);
1885	finish_arch_switch(prev);
1886	perf_event_task_sched_in(prev, current);
1887	finish_lock_switch(rq, prev);
1888	finish_arch_post_lock_switch();
1889
1890	fire_sched_in_preempt_notifiers(current);
1891	if (mm)
1892		mmdrop(mm);
1893	if (unlikely(prev_state == TASK_DEAD)) {
1894		/*
1895		 * Remove function-return probe instances associated with this
1896		 * task and put them back on the free list.
1897		 */
1898		kprobe_flush_task(prev);
1899		put_task_struct(prev);
1900	}
1901
1902	tick_nohz_task_switch(current);
1903}
1904
1905#ifdef CONFIG_SMP
1906
1907/* assumes rq->lock is held */
1908static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1909{
1910	if (prev->sched_class->pre_schedule)
1911		prev->sched_class->pre_schedule(rq, prev);
1912}
1913
1914/* rq->lock is NOT held, but preemption is disabled */
1915static inline void post_schedule(struct rq *rq)
1916{
1917	if (rq->post_schedule) {
1918		unsigned long flags;
1919
1920		raw_spin_lock_irqsave(&rq->lock, flags);
1921		if (rq->curr->sched_class->post_schedule)
1922			rq->curr->sched_class->post_schedule(rq);
1923		raw_spin_unlock_irqrestore(&rq->lock, flags);
1924
1925		rq->post_schedule = 0;
1926	}
1927}
1928
1929#else
1930
1931static inline void pre_schedule(struct rq *rq, struct task_struct *p)
1932{
1933}
1934
1935static inline void post_schedule(struct rq *rq)
1936{
1937}
1938
1939#endif
1940
1941/**
1942 * schedule_tail - first thing a freshly forked thread must call.
1943 * @prev: the thread we just switched away from.
1944 */
1945asmlinkage void schedule_tail(struct task_struct *prev)
1946	__releases(rq->lock)
1947{
1948	struct rq *rq = this_rq();
1949
1950	finish_task_switch(rq, prev);
1951
1952	/*
1953	 * FIXME: do we need to worry about rq being invalidated by the
1954	 * task_switch?
1955	 */
1956	post_schedule(rq);
1957
1958#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1959	/* In this case, finish_task_switch does not reenable preemption */
1960	preempt_enable();
1961#endif
1962	if (current->set_child_tid)
1963		put_user(task_pid_vnr(current), current->set_child_tid);
1964}
1965
1966/*
1967 * context_switch - switch to the new MM and the new
1968 * thread's register state.
1969 */
1970static inline void
1971context_switch(struct rq *rq, struct task_struct *prev,
1972	       struct task_struct *next)
1973{
1974	struct mm_struct *mm, *oldmm;
1975
1976	prepare_task_switch(rq, prev, next);
1977
1978	mm = next->mm;
1979	oldmm = prev->active_mm;
1980	/*
1981	 * For paravirt, this is coupled with an exit in switch_to to
1982	 * combine the page table reload and the switch backend into
1983	 * one hypercall.
1984	 */
1985	arch_start_context_switch(prev);
1986
1987	if (!mm) {
1988		next->active_mm = oldmm;
1989		atomic_inc(&oldmm->mm_count);
1990		enter_lazy_tlb(oldmm, next);
1991	} else
1992		switch_mm(oldmm, mm, next);
1993
1994	if (!prev->mm) {
1995		prev->active_mm = NULL;
1996		rq->prev_mm = oldmm;
1997	}
1998	/*
1999	 * Since the runqueue lock will be released by the next
2000	 * task (which is an invalid locking op but in the case
2001	 * of the scheduler it's an obvious special-case), so we
2002	 * do an early lockdep release here:
2003	 */
2004#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2005	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2006#endif
2007
2008	context_tracking_task_switch(prev, next);
2009	/* Here we just switch the register state and the stack. */
2010	switch_to(prev, next, prev);
2011
2012	barrier();
2013	/*
2014	 * this_rq must be evaluated again because prev may have moved
2015	 * CPUs since it called schedule(), thus the 'rq' on its stack
2016	 * frame will be invalid.
2017	 */
2018	finish_task_switch(this_rq(), prev);
2019}
2020
2021/*
2022 * nr_running and nr_context_switches:
2023 *
2024 * externally visible scheduler statistics: current number of runnable
2025 * threads, total number of context switches performed since bootup.
2026 */
2027unsigned long nr_running(void)
2028{
2029	unsigned long i, sum = 0;
2030
2031	for_each_online_cpu(i)
2032		sum += cpu_rq(i)->nr_running;
2033
2034	return sum;
2035}
2036
2037unsigned long long nr_context_switches(void)
2038{
2039	int i;
2040	unsigned long long sum = 0;
2041
2042	for_each_possible_cpu(i)
2043		sum += cpu_rq(i)->nr_switches;
2044
2045	return sum;
2046}
2047
2048unsigned long nr_iowait(void)
2049{
2050	unsigned long i, sum = 0;
2051
2052	for_each_possible_cpu(i)
2053		sum += atomic_read(&cpu_rq(i)->nr_iowait);
2054
2055	return sum;
2056}
2057
2058unsigned long nr_iowait_cpu(int cpu)
2059{
2060	struct rq *this = cpu_rq(cpu);
2061	return atomic_read(&this->nr_iowait);
2062}
2063
2064#ifdef CONFIG_SMP
2065
2066/*
2067 * sched_exec - execve() is a valuable balancing opportunity, because at
2068 * this point the task has the smallest effective memory and cache footprint.
2069 */
2070void sched_exec(void)
2071{
2072	struct task_struct *p = current;
2073	unsigned long flags;
2074	int dest_cpu;
2075
2076	raw_spin_lock_irqsave(&p->pi_lock, flags);
2077	dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
2078	if (dest_cpu == smp_processor_id())
2079		goto unlock;
2080
2081	if (likely(cpu_active(dest_cpu))) {
2082		struct migration_arg arg = { p, dest_cpu };
2083
2084		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2085		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2086		return;
2087	}
2088unlock:
2089	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2090}
2091
2092#endif
2093
2094DEFINE_PER_CPU(struct kernel_stat, kstat);
2095DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2096
2097EXPORT_PER_CPU_SYMBOL(kstat);
2098EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2099
2100/*
2101 * Return any ns on the sched_clock that have not yet been accounted in
2102 * @p in case that task is currently running.
2103 *
2104 * Called with task_rq_lock() held on @rq.
2105 */
2106static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2107{
2108	u64 ns = 0;
2109
2110	if (task_current(rq, p)) {
2111		update_rq_clock(rq);
2112		ns = rq_clock_task(rq) - p->se.exec_start;
2113		if ((s64)ns < 0)
2114			ns = 0;
2115	}
2116
2117	return ns;
2118}
2119
2120unsigned long long task_delta_exec(struct task_struct *p)
2121{
2122	unsigned long flags;
2123	struct rq *rq;
2124	u64 ns = 0;
2125
2126	rq = task_rq_lock(p, &flags);
2127	ns = do_task_delta_exec(p, rq);
2128	task_rq_unlock(rq, p, &flags);
2129
2130	return ns;
2131}
2132
2133/*
2134 * Return accounted runtime for the task.
2135 * In case the task is currently running, return the runtime plus current's
2136 * pending runtime that have not been accounted yet.
2137 */
2138unsigned long long task_sched_runtime(struct task_struct *p)
2139{
2140	unsigned long flags;
2141	struct rq *rq;
2142	u64 ns = 0;
2143
2144	rq = task_rq_lock(p, &flags);
2145	ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
2146	task_rq_unlock(rq, p, &flags);
2147
2148	return ns;
2149}
2150
2151/*
2152 * This function gets called by the timer code, with HZ frequency.
2153 * We call it with interrupts disabled.
2154 */
2155void scheduler_tick(void)
2156{
2157	int cpu = smp_processor_id();
2158	struct rq *rq = cpu_rq(cpu);
2159	struct task_struct *curr = rq->curr;
2160
2161	sched_clock_tick();
2162
2163	raw_spin_lock(&rq->lock);
2164	update_rq_clock(rq);
2165	curr->sched_class->task_tick(rq, curr, 0);
2166	update_cpu_load_active(rq);
2167	raw_spin_unlock(&rq->lock);
2168
2169	perf_event_task_tick();
2170
2171#ifdef CONFIG_SMP
2172	rq->idle_balance = idle_cpu(cpu);
2173	trigger_load_balance(rq, cpu);
2174#endif
2175	rq_last_tick_reset(rq);
2176}
2177
2178#ifdef CONFIG_NO_HZ_FULL
2179/**
2180 * scheduler_tick_max_deferment
2181 *
2182 * Keep at least one tick per second when a single
2183 * active task is running because the scheduler doesn't
2184 * yet completely support full dynticks environment.
2185 *
2186 * This makes sure that uptime, CFS vruntime, load
2187 * balancing, etc... continue to move forward, even
2188 * with a very low granularity.
2189 *
2190 * Return: Maximum deferment in nanoseconds.
2191 */
2192u64 scheduler_tick_max_deferment(void)
2193{
2194	struct rq *rq = this_rq();
2195	unsigned long next, now = ACCESS_ONCE(jiffies);
2196
2197	next = rq->last_sched_tick + HZ;
2198
2199	if (time_before_eq(next, now))
2200		return 0;
2201
2202	return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
2203}
2204#endif
2205
2206notrace unsigned long get_parent_ip(unsigned long addr)
2207{
2208	if (in_lock_functions(addr)) {
2209		addr = CALLER_ADDR2;
2210		if (in_lock_functions(addr))
2211			addr = CALLER_ADDR3;
2212	}
2213	return addr;
2214}
2215
2216#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2217				defined(CONFIG_PREEMPT_TRACER))
2218
2219void __kprobes add_preempt_count(int val)
2220{
2221#ifdef CONFIG_DEBUG_PREEMPT
2222	/*
2223	 * Underflow?
2224	 */
2225	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2226		return;
2227#endif
2228	preempt_count() += val;
2229#ifdef CONFIG_DEBUG_PREEMPT
2230	/*
2231	 * Spinlock count overflowing soon?
2232	 */
2233	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2234				PREEMPT_MASK - 10);
2235#endif
2236	if (preempt_count() == val)
2237		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2238}
2239EXPORT_SYMBOL(add_preempt_count);
2240
2241void __kprobes sub_preempt_count(int val)
2242{
2243#ifdef CONFIG_DEBUG_PREEMPT
2244	/*
2245	 * Underflow?
2246	 */
2247	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
2248		return;
2249	/*
2250	 * Is the spinlock portion underflowing?
2251	 */
2252	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2253			!(preempt_count() & PREEMPT_MASK)))
2254		return;
2255#endif
2256
2257	if (preempt_count() == val)
2258		trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2259	preempt_count() -= val;
2260}
2261EXPORT_SYMBOL(sub_preempt_count);
2262
2263#endif
2264
2265/*
2266 * Print scheduling while atomic bug:
2267 */
2268static noinline void __schedule_bug(struct task_struct *prev)
2269{
2270	if (oops_in_progress)
2271		return;
2272
2273	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2274		prev->comm, prev->pid, preempt_count());
2275
2276	debug_show_held_locks(prev);
2277	print_modules();
2278	if (irqs_disabled())
2279		print_irqtrace_events(prev);
2280	dump_stack();
2281	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
2282}
2283
2284/*
2285 * Various schedule()-time debugging checks and statistics:
2286 */
2287static inline void schedule_debug(struct task_struct *prev)
2288{
2289	/*
2290	 * Test if we are atomic. Since do_exit() needs to call into
2291	 * schedule() atomically, we ignore that path for now.
2292	 * Otherwise, whine if we are scheduling when we should not be.
2293	 */
2294	if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
2295		__schedule_bug(prev);
2296	rcu_sleep_check();
2297
2298	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2299
2300	schedstat_inc(this_rq(), sched_count);
2301}
2302
2303static void put_prev_task(struct rq *rq, struct task_struct *prev)
2304{
2305	if (prev->on_rq || rq->skip_clock_update < 0)
2306		update_rq_clock(rq);
2307	prev->sched_class->put_prev_task(rq, prev);
2308}
2309
2310/*
2311 * Pick up the highest-prio task:
2312 */
2313static inline struct task_struct *
2314pick_next_task(struct rq *rq)
2315{
2316	const struct sched_class *class;
2317	struct task_struct *p;
2318
2319	/*
2320	 * Optimization: we know that if all tasks are in
2321	 * the fair class we can call that function directly:
2322	 */
2323	if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
2324		p = fair_sched_class.pick_next_task(rq);
2325		if (likely(p))
2326			return p;
2327	}
2328
2329	for_each_class(class) {
2330		p = class->pick_next_task(rq);
2331		if (p)
2332			return p;
2333	}
2334
2335	BUG(); /* the idle class will always have a runnable task */
2336}
2337
2338/*
2339 * __schedule() is the main scheduler function.
2340 *
2341 * The main means of driving the scheduler and thus entering this function are:
2342 *
2343 *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
2344 *
2345 *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
2346 *      paths. For example, see arch/x86/entry_64.S.
2347 *
2348 *      To drive preemption between tasks, the scheduler sets the flag in timer
2349 *      interrupt handler scheduler_tick().
2350 *
2351 *   3. Wakeups don't really cause entry into schedule(). They add a
2352 *      task to the run-queue and that's it.
2353 *
2354 *      Now, if the new task added to the run-queue preempts the current
2355 *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
2356 *      called on the nearest possible occasion:
2357 *
2358 *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
2359 *
2360 *         - in syscall or exception context, at the next outmost
2361 *           preempt_enable(). (this might be as soon as the wake_up()'s
2362 *           spin_unlock()!)
2363 *
2364 *         - in IRQ context, return from interrupt-handler to
2365 *           preemptible context
2366 *
2367 *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
2368 *         then at the next:
2369 *
2370 *          - cond_resched() call
2371 *          - explicit schedule() call
2372 *          - return from syscall or exception to user-space
2373 *          - return from interrupt-handler to user-space
2374 */
2375static void __sched __schedule(void)
2376{
2377	struct task_struct *prev, *next;
2378	unsigned long *switch_count;
2379	struct rq *rq;
2380	int cpu;
2381
2382need_resched:
2383	preempt_disable();
2384	cpu = smp_processor_id();
2385	rq = cpu_rq(cpu);
2386	rcu_note_context_switch(cpu);
2387	prev = rq->curr;
2388
2389	schedule_debug(prev);
2390
2391	if (sched_feat(HRTICK))
2392		hrtick_clear(rq);
2393
2394	/*
2395	 * Make sure that signal_pending_state()->signal_pending() below
2396	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
2397	 * done by the caller to avoid the race with signal_wake_up().
2398	 */
2399	smp_mb__before_spinlock();
2400	raw_spin_lock_irq(&rq->lock);
2401
2402	switch_count = &prev->nivcsw;
2403	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2404		if (unlikely(signal_pending_state(prev->state, prev))) {
2405			prev->state = TASK_RUNNING;
2406		} else {
2407			deactivate_task(rq, prev, DEQUEUE_SLEEP);
2408			prev->on_rq = 0;
2409
2410			/*
2411			 * If a worker went to sleep, notify and ask workqueue
2412			 * whether it wants to wake up a task to maintain
2413			 * concurrency.
2414			 */
2415			if (prev->flags & PF_WQ_WORKER) {
2416				struct task_struct *to_wakeup;
2417
2418				to_wakeup = wq_worker_sleeping(prev, cpu);
2419				if (to_wakeup)
2420					try_to_wake_up_local(to_wakeup);
2421			}
2422		}
2423		switch_count = &prev->nvcsw;
2424	}
2425
2426	pre_schedule(rq, prev);
2427
2428	if (unlikely(!rq->nr_running))
2429		idle_balance(cpu, rq);
2430
2431	put_prev_task(rq, prev);
2432	next = pick_next_task(rq);
2433	clear_tsk_need_resched(prev);
2434	rq->skip_clock_update = 0;
2435
2436	if (likely(prev != next)) {
2437		rq->nr_switches++;
2438		rq->curr = next;
2439		++*switch_count;
2440
2441		context_switch(rq, prev, next); /* unlocks the rq */
2442		/*
2443		 * The context switch have flipped the stack from under us
2444		 * and restored the local variables which were saved when
2445		 * this task called schedule() in the past. prev == current
2446		 * is still correct, but it can be moved to another cpu/rq.
2447		 */
2448		cpu = smp_processor_id();
2449		rq = cpu_rq(cpu);
2450	} else
2451		raw_spin_unlock_irq(&rq->lock);
2452
2453	post_schedule(rq);
2454
2455	sched_preempt_enable_no_resched();
2456	if (need_resched())
2457		goto need_resched;
2458}
2459
2460static inline void sched_submit_work(struct task_struct *tsk)
2461{
2462	if (!tsk->state || tsk_is_pi_blocked(tsk))
2463		return;
2464	/*
2465	 * If we are going to sleep and we have plugged IO queued,
2466	 * make sure to submit it to avoid deadlocks.
2467	 */
2468	if (blk_needs_flush_plug(tsk))
2469		blk_schedule_flush_plug(tsk);
2470}
2471
2472asmlinkage void __sched schedule(void)
2473{
2474	struct task_struct *tsk = current;
2475
2476	sched_submit_work(tsk);
2477	__schedule();
2478}
2479EXPORT_SYMBOL(schedule);
2480
2481#ifdef CONFIG_CONTEXT_TRACKING
2482asmlinkage void __sched schedule_user(void)
2483{
2484	/*
2485	 * If we come here after a random call to set_need_resched(),
2486	 * or we have been woken up remotely but the IPI has not yet arrived,
2487	 * we haven't yet exited the RCU idle mode. Do it here manually until
2488	 * we find a better solution.
2489	 */
2490	user_exit();
2491	schedule();
2492	user_enter();
2493}
2494#endif
2495
2496/**
2497 * schedule_preempt_disabled - called with preemption disabled
2498 *
2499 * Returns with preemption disabled. Note: preempt_count must be 1
2500 */
2501void __sched schedule_preempt_disabled(void)
2502{
2503	sched_preempt_enable_no_resched();
2504	schedule();
2505	preempt_disable();
2506}
2507
2508#ifdef CONFIG_PREEMPT
2509/*
2510 * this is the entry point to schedule() from in-kernel preemption
2511 * off of preempt_enable. Kernel preemptions off return from interrupt
2512 * occur there and call schedule directly.
2513 */
2514asmlinkage void __sched notrace preempt_schedule(void)
2515{
2516	/*
2517	 * If there is a non-zero preempt_count or interrupts are disabled,
2518	 * we do not want to preempt the current task. Just return..
2519	 */
2520	if (likely(!preemptible()))
2521		return;
2522
2523	do {
2524		add_preempt_count_notrace(PREEMPT_ACTIVE);
2525		__schedule();
2526		sub_preempt_count_notrace(PREEMPT_ACTIVE);
2527
2528		/*
2529		 * Check again in case we missed a preemption opportunity
2530		 * between schedule and now.
2531		 */
2532		barrier();
2533	} while (need_resched());
2534}
2535EXPORT_SYMBOL(preempt_schedule);
2536
2537/*
2538 * this is the entry point to schedule() from kernel preemption
2539 * off of irq context.
2540 * Note, that this is called and return with irqs disabled. This will
2541 * protect us against recursive calling from irq.
2542 */
2543asmlinkage void __sched preempt_schedule_irq(void)
2544{
2545	struct thread_info *ti = current_thread_info();
2546	enum ctx_state prev_state;
2547
2548	/* Catch callers which need to be fixed */
2549	BUG_ON(ti->preempt_count || !irqs_disabled());
2550
2551	prev_state = exception_enter();
2552
2553	do {
2554		add_preempt_count(PREEMPT_ACTIVE);
2555		local_irq_enable();
2556		__schedule();
2557		local_irq_disable();
2558		sub_preempt_count(PREEMPT_ACTIVE);
2559
2560		/*
2561		 * Check again in case we missed a preemption opportunity
2562		 * between schedule and now.
2563		 */
2564		barrier();
2565	} while (need_resched());
2566
2567	exception_exit(prev_state);
2568}
2569
2570#endif /* CONFIG_PREEMPT */
2571
2572int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
2573			  void *key)
2574{
2575	return try_to_wake_up(curr->private, mode, wake_flags);
2576}
2577EXPORT_SYMBOL(default_wake_function);
2578
2579/*
2580 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
2581 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
2582 * number) then we wake all the non-exclusive tasks and one exclusive task.
2583 *
2584 * There are circumstances in which we can try to wake a task which has already
2585 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
2586 * zero in this (rare) case, and we handle it by continuing to scan the queue.
2587 */
2588static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
2589			int nr_exclusive, int wake_flags, void *key)
2590{
2591	wait_queue_t *curr, *next;
2592
2593	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
2594		unsigned flags = curr->flags;
2595
2596		if (curr->func(curr, mode, wake_flags, key) &&
2597				(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
2598			break;
2599	}
2600}
2601
2602/**
2603 * __wake_up - wake up threads blocked on a waitqueue.
2604 * @q: the waitqueue
2605 * @mode: which threads
2606 * @nr_exclusive: how many wake-one or wake-many threads to wake up
2607 * @key: is directly passed to the wakeup function
2608 *
2609 * It may be assumed that this function implies a write memory barrier before
2610 * changing the task state if and only if any tasks are woken up.
2611 */
2612void __wake_up(wait_queue_head_t *q, unsigned int mode,
2613			int nr_exclusive, void *key)
2614{
2615	unsigned long flags;
2616
2617	spin_lock_irqsave(&q->lock, flags);
2618	__wake_up_common(q, mode, nr_exclusive, 0, key);
2619	spin_unlock_irqrestore(&q->lock, flags);
2620}
2621EXPORT_SYMBOL(__wake_up);
2622
2623/*
2624 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
2625 */
2626void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
2627{
2628	__wake_up_common(q, mode, nr, 0, NULL);
2629}
2630EXPORT_SYMBOL_GPL(__wake_up_locked);
2631
2632void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
2633{
2634	__wake_up_common(q, mode, 1, 0, key);
2635}
2636EXPORT_SYMBOL_GPL(__wake_up_locked_key);
2637
2638/**
2639 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
2640 * @q: the waitqueue
2641 * @mode: which threads
2642 * @nr_exclusive: how many wake-one or wake-many threads to wake up
2643 * @key: opaque value to be passed to wakeup targets
2644 *
2645 * The sync wakeup differs that the waker knows that it will schedule
2646 * away soon, so while the target thread will be woken up, it will not
2647 * be migrated to another CPU - ie. the two threads are 'synchronized'
2648 * with each other. This can prevent needless bouncing between CPUs.
2649 *
2650 * On UP it can prevent extra preemption.
2651 *
2652 * It may be assumed that this function implies a write memory barrier before
2653 * changing the task state if and only if any tasks are woken up.
2654 */
2655void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
2656			int nr_exclusive, void *key)
2657{
2658	unsigned long flags;
2659	int wake_flags = WF_SYNC;
2660
2661	if (unlikely(!q))
2662		return;
2663
2664	if (unlikely(nr_exclusive != 1))
2665		wake_flags = 0;
2666
2667	spin_lock_irqsave(&q->lock, flags);
2668	__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
2669	spin_unlock_irqrestore(&q->lock, flags);
2670}
2671EXPORT_SYMBOL_GPL(__wake_up_sync_key);
2672
2673/*
2674 * __wake_up_sync - see __wake_up_sync_key()
2675 */
2676void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
2677{
2678	__wake_up_sync_key(q, mode, nr_exclusive, NULL);
2679}
2680EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
2681
2682/**
2683 * complete: - signals a single thread waiting on this completion
2684 * @x:  holds the state of this particular completion
2685 *
2686 * This will wake up a single thread waiting on this completion. Threads will be
2687 * awakened in the same order in which they were queued.
2688 *
2689 * See also complete_all(), wait_for_completion() and related routines.
2690 *
2691 * It may be assumed that this function implies a write memory barrier before
2692 * changing the task state if and only if any tasks are woken up.
2693 */
2694void complete(struct completion *x)
2695{
2696	unsigned long flags;
2697
2698	spin_lock_irqsave(&x->wait.lock, flags);
2699	x->done++;
2700	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
2701	spin_unlock_irqrestore(&x->wait.lock, flags);
2702}
2703EXPORT_SYMBOL(complete);
2704
2705/**
2706 * complete_all: - signals all threads waiting on this completion
2707 * @x:  holds the state of this particular completion
2708 *
2709 * This will wake up all threads waiting on this particular completion event.
2710 *
2711 * It may be assumed that this function implies a write memory barrier before
2712 * changing the task state if and only if any tasks are woken up.
2713 */
2714void complete_all(struct completion *x)
2715{
2716	unsigned long flags;
2717
2718	spin_lock_irqsave(&x->wait.lock, flags);
2719	x->done += UINT_MAX/2;
2720	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
2721	spin_unlock_irqrestore(&x->wait.lock, flags);
2722}
2723EXPORT_SYMBOL(complete_all);
2724
2725static inline long __sched
2726do_wait_for_common(struct completion *x,
2727		   long (*action)(long), long timeout, int state)
2728{
2729	if (!x->done) {
2730		DECLARE_WAITQUEUE(wait, current);
2731
2732		__add_wait_queue_tail_exclusive(&x->wait, &wait);
2733		do {
2734			if (signal_pending_state(state, current)) {
2735				timeout = -ERESTARTSYS;
2736				break;
2737			}
2738			__set_current_state(state);
2739			spin_unlock_irq(&x->wait.lock);
2740			timeout = action(timeout);
2741			spin_lock_irq(&x->wait.lock);
2742		} while (!x->done && timeout);
2743		__remove_wait_queue(&x->wait, &wait);
2744		if (!x->done)
2745			return timeout;
2746	}
2747	x->done--;
2748	return timeout ?: 1;
2749}
2750
2751static inline long __sched
2752__wait_for_common(struct completion *x,
2753		  long (*action)(long), long timeout, int state)
2754{
2755	might_sleep();
2756
2757	spin_lock_irq(&x->wait.lock);
2758	timeout = do_wait_for_common(x, action, timeout, state);
2759	spin_unlock_irq(&x->wait.lock);
2760	return timeout;
2761}
2762
2763static long __sched
2764wait_for_common(struct completion *x, long timeout, int state)
2765{
2766	return __wait_for_common(x, schedule_timeout, timeout, state);
2767}
2768
2769static long __sched
2770wait_for_common_io(struct completion *x, long timeout, int state)
2771{
2772	return __wait_for_common(x, io_schedule_timeout, timeout, state);
2773}
2774
2775/**
2776 * wait_for_completion: - waits for completion of a task
2777 * @x:  holds the state of this particular completion
2778 *
2779 * This waits to be signaled for completion of a specific task. It is NOT
2780 * interruptible and there is no timeout.
2781 *
2782 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
2783 * and interrupt capability. Also see complete().
2784 */
2785void __sched wait_for_completion(struct completion *x)
2786{
2787	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
2788}
2789EXPORT_SYMBOL(wait_for_completion);
2790
2791/**
2792 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
2793 * @x:  holds the state of this particular completion
2794 * @timeout:  timeout value in jiffies
2795 *
2796 * This waits for either a completion of a specific task to be signaled or for a
2797 * specified timeout to expire. The timeout is in jiffies. It is not
2798 * interruptible.
2799 *
2800 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2801 * till timeout) if completed.
2802 */
2803unsigned long __sched
2804wait_for_completion_timeout(struct completion *x, unsigned long timeout)
2805{
2806	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
2807}
2808EXPORT_SYMBOL(wait_for_completion_timeout);
2809
2810/**
2811 * wait_for_completion_io: - waits for completion of a task
2812 * @x:  holds the state of this particular completion
2813 *
2814 * This waits to be signaled for completion of a specific task. It is NOT
2815 * interruptible and there is no timeout. The caller is accounted as waiting
2816 * for IO.
2817 */
2818void __sched wait_for_completion_io(struct completion *x)
2819{
2820	wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
2821}
2822EXPORT_SYMBOL(wait_for_completion_io);
2823
2824/**
2825 * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
2826 * @x:  holds the state of this particular completion
2827 * @timeout:  timeout value in jiffies
2828 *
2829 * This waits for either a completion of a specific task to be signaled or for a
2830 * specified timeout to expire. The timeout is in jiffies. It is not
2831 * interruptible. The caller is accounted as waiting for IO.
2832 *
2833 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2834 * till timeout) if completed.
2835 */
2836unsigned long __sched
2837wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
2838{
2839	return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
2840}
2841EXPORT_SYMBOL(wait_for_completion_io_timeout);
2842
2843/**
2844 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
2845 * @x:  holds the state of this particular completion
2846 *
2847 * This waits for completion of a specific task to be signaled. It is
2848 * interruptible.
2849 *
2850 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2851 */
2852int __sched wait_for_completion_interruptible(struct completion *x)
2853{
2854	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
2855	if (t == -ERESTARTSYS)
2856		return t;
2857	return 0;
2858}
2859EXPORT_SYMBOL(wait_for_completion_interruptible);
2860
2861/**
2862 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
2863 * @x:  holds the state of this particular completion
2864 * @timeout:  timeout value in jiffies
2865 *
2866 * This waits for either a completion of a specific task to be signaled or for a
2867 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
2868 *
2869 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2870 * or number of jiffies left till timeout) if completed.
2871 */
2872long __sched
2873wait_for_completion_interruptible_timeout(struct completion *x,
2874					  unsigned long timeout)
2875{
2876	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
2877}
2878EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
2879
2880/**
2881 * wait_for_completion_killable: - waits for completion of a task (killable)
2882 * @x:  holds the state of this particular completion
2883 *
2884 * This waits to be signaled for completion of a specific task. It can be
2885 * interrupted by a kill signal.
2886 *
2887 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2888 */
2889int __sched wait_for_completion_killable(struct completion *x)
2890{
2891	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
2892	if (t == -ERESTARTSYS)
2893		return t;
2894	return 0;
2895}
2896EXPORT_SYMBOL(wait_for_completion_killable);
2897
2898/**
2899 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
2900 * @x:  holds the state of this particular completion
2901 * @timeout:  timeout value in jiffies
2902 *
2903 * This waits for either a completion of a specific task to be
2904 * signaled or for a specified timeout to expire. It can be
2905 * interrupted by a kill signal. The timeout is in jiffies.
2906 *
2907 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2908 * or number of jiffies left till timeout) if completed.
2909 */
2910long __sched
2911wait_for_completion_killable_timeout(struct completion *x,
2912				     unsigned long timeout)
2913{
2914	return wait_for_common(x, timeout, TASK_KILLABLE);
2915}
2916EXPORT_SYMBOL(wait_for_completion_killable_timeout);
2917
2918/**
2919 *	try_wait_for_completion - try to decrement a completion without blocking
2920 *	@x:	completion structure
2921 *
2922 *	Return: 0 if a decrement cannot be done without blocking
2923 *		 1 if a decrement succeeded.
2924 *
2925 *	If a completion is being used as a counting completion,
2926 *	attempt to decrement the counter without blocking. This
2927 *	enables us to avoid waiting if the resource the completion
2928 *	is protecting is not available.
2929 */
2930bool try_wait_for_completion(struct completion *x)
2931{
2932	unsigned long flags;
2933	int ret = 1;
2934
2935	spin_lock_irqsave(&x->wait.lock, flags);
2936	if (!x->done)
2937		ret = 0;
2938	else
2939		x->done--;
2940	spin_unlock_irqrestore(&x->wait.lock, flags);
2941	return ret;
2942}
2943EXPORT_SYMBOL(try_wait_for_completion);
2944
2945/**
2946 *	completion_done - Test to see if a completion has any waiters
2947 *	@x:	completion structure
2948 *
2949 *	Return: 0 if there are waiters (wait_for_completion() in progress)
2950 *		 1 if there are no waiters.
2951 *
2952 */
2953bool completion_done(struct completion *x)
2954{
2955	unsigned long flags;
2956	int ret = 1;
2957
2958	spin_lock_irqsave(&x->wait.lock, flags);
2959	if (!x->done)
2960		ret = 0;
2961	spin_unlock_irqrestore(&x->wait.lock, flags);
2962	return ret;
2963}
2964EXPORT_SYMBOL(completion_done);
2965
2966static long __sched
2967sleep_on_common(wait_queue_head_t *q, int state, long timeout)
2968{
2969	unsigned long flags;
2970	wait_queue_t wait;
2971
2972	init_waitqueue_entry(&wait, current);
2973
2974	__set_current_state(state);
2975
2976	spin_lock_irqsave(&q->lock, flags);
2977	__add_wait_queue(q, &wait);
2978	spin_unlock(&q->lock);
2979	timeout = schedule_timeout(timeout);
2980	spin_lock_irq(&q->lock);
2981	__remove_wait_queue(q, &wait);
2982	spin_unlock_irqrestore(&q->lock, flags);
2983
2984	return timeout;
2985}
2986
2987void __sched interruptible_sleep_on(wait_queue_head_t *q)
2988{
2989	sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2990}
2991EXPORT_SYMBOL(interruptible_sleep_on);
2992
2993long __sched
2994interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
2995{
2996	return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
2997}
2998EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2999
3000void __sched sleep_on(wait_queue_head_t *q)
3001{
3002	sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3003}
3004EXPORT_SYMBOL(sleep_on);
3005
3006long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3007{
3008	return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
3009}
3010EXPORT_SYMBOL(sleep_on_timeout);
3011
3012#ifdef CONFIG_RT_MUTEXES
3013
3014/*
3015 * rt_mutex_setprio - set the current priority of a task
3016 * @p: task
3017 * @prio: prio value (kernel-internal form)
3018 *
3019 * This function changes the 'effective' priority of a task. It does
3020 * not touch ->normal_prio like __setscheduler().
3021 *
3022 * Used by the rt_mutex code to implement priority inheritance logic.
3023 */
3024void rt_mutex_setprio(struct task_struct *p, int prio)
3025{
3026	int oldprio, on_rq, running;
3027	struct rq *rq;
3028	const struct sched_class *prev_class;
3029
3030	BUG_ON(prio < 0 || prio > MAX_PRIO);
3031
3032	rq = __task_rq_lock(p);
3033
3034	/*
3035	 * Idle task boosting is a nono in general. There is one
3036	 * exception, when PREEMPT_RT and NOHZ is active:
3037	 *
3038	 * The idle task calls get_next_timer_interrupt() and holds
3039	 * the timer wheel base->lock on the CPU and another CPU wants
3040	 * to access the timer (probably to cancel it). We can safely
3041	 * ignore the boosting request, as the idle CPU runs this code
3042	 * with interrupts disabled and will complete the lock
3043	 * protected section without being interrupted. So there is no
3044	 * real need to boost.
3045	 */
3046	if (unlikely(p == rq->idle)) {
3047		WARN_ON(p != rq->curr);
3048		WARN_ON(p->pi_blocked_on);
3049		goto out_unlock;
3050	}
3051
3052	trace_sched_pi_setprio(p, prio);
3053	oldprio = p->prio;
3054	prev_class = p->sched_class;
3055	on_rq = p->on_rq;
3056	running = task_current(rq, p);
3057	if (on_rq)
3058		dequeue_task(rq, p, 0);
3059	if (running)
3060		p->sched_class->put_prev_task(rq, p);
3061
3062	if (rt_prio(prio))
3063		p->sched_class = &rt_sched_class;
3064	else
3065		p->sched_class = &fair_sched_class;
3066
3067	p->prio = prio;
3068
3069	if (running)
3070		p->sched_class->set_curr_task(rq);
3071	if (on_rq)
3072		enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
3073
3074	check_class_changed(rq, p, prev_class, oldprio);
3075out_unlock:
3076	__task_rq_unlock(rq);
3077}
3078#endif
3079void set_user_nice(struct task_struct *p, long nice)
3080{
3081	int old_prio, delta, on_rq;
3082	unsigned long flags;
3083	struct rq *rq;
3084
3085	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3086		return;
3087	/*
3088	 * We have to be careful, if called from sys_setpriority(),
3089	 * the task might be in the middle of scheduling on another CPU.
3090	 */
3091	rq = task_rq_lock(p, &flags);
3092	/*
3093	 * The RT priorities are set via sched_setscheduler(), but we still
3094	 * allow the 'normal' nice value to be set - but as expected
3095	 * it wont have any effect on scheduling until the task is
3096	 * SCHED_FIFO/SCHED_RR:
3097	 */
3098	if (task_has_rt_policy(p)) {
3099		p->static_prio = NICE_TO_PRIO(nice);
3100		goto out_unlock;
3101	}
3102	on_rq = p->on_rq;
3103	if (on_rq)
3104		dequeue_task(rq, p, 0);
3105
3106	p->static_prio = NICE_TO_PRIO(nice);
3107	set_load_weight(p);
3108	old_prio = p->prio;
3109	p->prio = effective_prio(p);
3110	delta = p->prio - old_prio;
3111
3112	if (on_rq) {
3113		enqueue_task(rq, p, 0);
3114		/*
3115		 * If the task increased its priority or is running and
3116		 * lowered its priority, then reschedule its CPU:
3117		 */
3118		if (delta < 0 || (delta > 0 && task_running(rq, p)))
3119			resched_task(rq->curr);
3120	}
3121out_unlock:
3122	task_rq_unlock(rq, p, &flags);
3123}
3124EXPORT_SYMBOL(set_user_nice);
3125
3126/*
3127 * can_nice - check if a task can reduce its nice value
3128 * @p: task
3129 * @nice: nice value
3130 */
3131int can_nice(const struct task_struct *p, const int nice)
3132{
3133	/* convert nice value [19,-20] to rlimit style value [1,40] */
3134	int nice_rlim = 20 - nice;
3135
3136	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3137		capable(CAP_SYS_NICE));
3138}
3139
3140#ifdef __ARCH_WANT_SYS_NICE
3141
3142/*
3143 * sys_nice - change the priority of the current process.
3144 * @increment: priority increment
3145 *
3146 * sys_setpriority is a more generic, but much slower function that
3147 * does similar things.
3148 */
3149SYSCALL_DEFINE1(nice, int, increment)
3150{
3151	long nice, retval;
3152
3153	/*
3154	 * Setpriority might change our priority at the same moment.
3155	 * We don't have to worry. Conceptually one call occurs first
3156	 * and we have a single winner.
3157	 */
3158	if (increment < -40)
3159		increment = -40;
3160	if (increment > 40)
3161		increment = 40;
3162
3163	nice = TASK_NICE(current) + increment;
3164	if (nice < -20)
3165		nice = -20;
3166	if (nice > 19)
3167		nice = 19;
3168
3169	if (increment < 0 && !can_nice(current, nice))
3170		return -EPERM;
3171
3172	retval = security_task_setnice(current, nice);
3173	if (retval)
3174		return retval;
3175
3176	set_user_nice(current, nice);
3177	return 0;
3178}
3179
3180#endif
3181
3182/**
3183 * task_prio - return the priority value of a given task.
3184 * @p: the task in question.
3185 *
3186 * Return: The priority value as seen by users in /proc.
3187 * RT tasks are offset by -200. Normal tasks are centered
3188 * around 0, value goes from -16 to +15.
3189 */
3190int task_prio(const struct task_struct *p)
3191{
3192	return p->prio - MAX_RT_PRIO;
3193}
3194
3195/**
3196 * task_nice - return the nice value of a given task.
3197 * @p: the task in question.
3198 *
3199 * Return: The nice value [ -20 ... 0 ... 19 ].
3200 */
3201int task_nice(const struct task_struct *p)
3202{
3203	return TASK_NICE(p);
3204}
3205EXPORT_SYMBOL(task_nice);
3206
3207/**
3208 * idle_cpu - is a given cpu idle currently?
3209 * @cpu: the processor in question.
3210 *
3211 * Return: 1 if the CPU is currently idle. 0 otherwise.
3212 */
3213int idle_cpu(int cpu)
3214{
3215	struct rq *rq = cpu_rq(cpu);
3216
3217	if (rq->curr != rq->idle)
3218		return 0;
3219
3220	if (rq->nr_running)
3221		return 0;
3222
3223#ifdef CONFIG_SMP
3224	if (!llist_empty(&rq->wake_list))
3225		return 0;
3226#endif
3227
3228	return 1;
3229}
3230
3231/**
3232 * idle_task - return the idle task for a given cpu.
3233 * @cpu: the processor in question.
3234 *
3235 * Return: The idle task for the cpu @cpu.
3236 */
3237struct task_struct *idle_task(int cpu)
3238{
3239	return cpu_rq(cpu)->idle;
3240}
3241
3242/**
3243 * find_process_by_pid - find a process with a matching PID value.
3244 * @pid: the pid in question.
3245 *
3246 * The task of @pid, if found. %NULL otherwise.
3247 */
3248static struct task_struct *find_process_by_pid(pid_t pid)
3249{
3250	return pid ? find_task_by_vpid(pid) : current;
3251}
3252
3253/* Actually do priority change: must hold rq lock. */
3254static void
3255__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
3256{
3257	p->policy = policy;
3258	p->rt_priority = prio;
3259	p->normal_prio = normal_prio(p);
3260	/* we are holding p->pi_lock already */
3261	p->prio = rt_mutex_getprio(p);
3262	if (rt_prio(p->prio))
3263		p->sched_class = &rt_sched_class;
3264	else
3265		p->sched_class = &fair_sched_class;
3266	set_load_weight(p);
3267}
3268
3269/*
3270 * check the target process has a UID that matches the current process's
3271 */
3272static bool check_same_owner(struct task_struct *p)
3273{
3274	const struct cred *cred = current_cred(), *pcred;
3275	bool match;
3276
3277	rcu_read_lock();
3278	pcred = __task_cred(p);
3279	match = (uid_eq(cred->euid, pcred->euid) ||
3280		 uid_eq(cred->euid, pcred->uid));
3281	rcu_read_unlock();
3282	return match;
3283}
3284
3285static int __sched_setscheduler(struct task_struct *p, int policy,
3286				const struct sched_param *param, bool user)
3287{
3288	int retval, oldprio, oldpolicy = -1, on_rq, running;
3289	unsigned long flags;
3290	const struct sched_class *prev_class;
3291	struct rq *rq;
3292	int reset_on_fork;
3293
3294	/* may grab non-irq protected spin_locks */
3295	BUG_ON(in_interrupt());
3296recheck:
3297	/* double check policy once rq lock held */
3298	if (policy < 0) {
3299		reset_on_fork = p->sched_reset_on_fork;
3300		policy = oldpolicy = p->policy;
3301	} else {
3302		reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3303		policy &= ~SCHED_RESET_ON_FORK;
3304
3305		if (policy != SCHED_FIFO && policy != SCHED_RR &&
3306				policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3307				policy != SCHED_IDLE)
3308			return -EINVAL;
3309	}
3310
3311	/*
3312	 * Valid priorities for SCHED_FIFO and SCHED_RR are
3313	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3314	 * SCHED_BATCH and SCHED_IDLE is 0.
3315	 */
3316	if (param->sched_priority < 0 ||
3317	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
3318	    (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
3319		return -EINVAL;
3320	if (rt_policy(policy) != (param->sched_priority != 0))
3321		return -EINVAL;
3322
3323	/*
3324	 * Allow unprivileged RT tasks to decrease priority:
3325	 */
3326	if (user && !capable(CAP_SYS_NICE)) {
3327		if (rt_policy(policy)) {
3328			unsigned long rlim_rtprio =
3329					task_rlimit(p, RLIMIT_RTPRIO);
3330
3331			/* can't set/change the rt policy */
3332			if (policy != p->policy && !rlim_rtprio)
3333				return -EPERM;
3334
3335			/* can't increase priority */
3336			if (param->sched_priority > p->rt_priority &&
3337			    param->sched_priority > rlim_rtprio)
3338				return -EPERM;
3339		}
3340
3341		/*
3342		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3343		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3344		 */
3345		if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3346			if (!can_nice(p, TASK_NICE(p)))
3347				return -EPERM;
3348		}
3349
3350		/* can't change other user's priorities */
3351		if (!check_same_owner(p))
3352			return -EPERM;
3353
3354		/* Normal users shall not reset the sched_reset_on_fork flag */
3355		if (p->sched_reset_on_fork && !reset_on_fork)
3356			return -EPERM;
3357	}
3358
3359	if (user) {
3360		retval = security_task_setscheduler(p);
3361		if (retval)
3362			return retval;
3363	}
3364
3365	/*
3366	 * make sure no PI-waiters arrive (or leave) while we are
3367	 * changing the priority of the task:
3368	 *
3369	 * To be able to change p->policy safely, the appropriate
3370	 * runqueue lock must be held.
3371	 */
3372	rq = task_rq_lock(p, &flags);
3373
3374	/*
3375	 * Changing the policy of the stop threads its a very bad idea
3376	 */
3377	if (p == rq->stop) {
3378		task_rq_unlock(rq, p, &flags);
3379		return -EINVAL;
3380	}
3381
3382	/*
3383	 * If not changing anything there's no need to proceed further:
3384	 */
3385	if (unlikely(policy == p->policy && (!rt_policy(policy) ||
3386			param->sched_priority == p->rt_priority))) {
3387		task_rq_unlock(rq, p, &flags);
3388		return 0;
3389	}
3390
3391#ifdef CONFIG_RT_GROUP_SCHED
3392	if (user) {
3393		/*
3394		 * Do not allow realtime tasks into groups that have no runtime
3395		 * assigned.
3396		 */
3397		if (rt_bandwidth_enabled() && rt_policy(policy) &&
3398				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3399				!task_group_is_autogroup(task_group(p))) {
3400			task_rq_unlock(rq, p, &flags);
3401			return -EPERM;
3402		}
3403	}
3404#endif
3405
3406	/* recheck policy now with rq lock held */
3407	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3408		policy = oldpolicy = -1;
3409		task_rq_unlock(rq, p, &flags);
3410		goto recheck;
3411	}
3412	on_rq = p->on_rq;
3413	running = task_current(rq, p);
3414	if (on_rq)
3415		dequeue_task(rq, p, 0);
3416	if (running)
3417		p->sched_class->put_prev_task(rq, p);
3418
3419	p->sched_reset_on_fork = reset_on_fork;
3420
3421	oldprio = p->prio;
3422	prev_class = p->sched_class;
3423	__setscheduler(rq, p, policy, param->sched_priority);
3424
3425	if (running)
3426		p->sched_class->set_curr_task(rq);
3427	if (on_rq)
3428		enqueue_task(rq, p, 0);
3429
3430	check_class_changed(rq, p, prev_class, oldprio);
3431	task_rq_unlock(rq, p, &flags);
3432
3433	rt_mutex_adjust_pi(p);
3434
3435	return 0;
3436}
3437
3438/**
3439 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3440 * @p: the task in question.
3441 * @policy: new policy.
3442 * @param: structure containing the new RT priority.
3443 *
3444 * Return: 0 on success. An error code otherwise.
3445 *
3446 * NOTE that the task may be already dead.
3447 */
3448int sched_setscheduler(struct task_struct *p, int policy,
3449		       const struct sched_param *param)
3450{
3451	return __sched_setscheduler(p, policy, param, true);
3452}
3453EXPORT_SYMBOL_GPL(sched_setscheduler);
3454
3455/**
3456 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
3457 * @p: the task in question.
3458 * @policy: new policy.
3459 * @param: structure containing the new RT priority.
3460 *
3461 * Just like sched_setscheduler, only don't bother checking if the
3462 * current context has permission.  For example, this is needed in
3463 * stop_machine(): we create temporary high priority worker threads,
3464 * but our caller might not have that capability.
3465 *
3466 * Return: 0 on success. An error code otherwise.
3467 */
3468int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3469			       const struct sched_param *param)
3470{
3471	return __sched_setscheduler(p, policy, param, false);
3472}
3473
3474static int
3475do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3476{
3477	struct sched_param lparam;
3478	struct task_struct *p;
3479	int retval;
3480
3481	if (!param || pid < 0)
3482		return -EINVAL;
3483	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3484		return -EFAULT;
3485
3486	rcu_read_lock();
3487	retval = -ESRCH;
3488	p = find_process_by_pid(pid);
3489	if (p != NULL)
3490		retval = sched_setscheduler(p, policy, &lparam);
3491	rcu_read_unlock();
3492
3493	return retval;
3494}
3495
3496/**
3497 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3498 * @pid: the pid in question.
3499 * @policy: new policy.
3500 * @param: structure containing the new RT priority.
3501 *
3502 * Return: 0 on success. An error code otherwise.
3503 */
3504SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3505		struct sched_param __user *, param)
3506{
3507	/* negative values for policy are not valid */
3508	if (policy < 0)
3509		return -EINVAL;
3510
3511	return do_sched_setscheduler(pid, policy, param);
3512}
3513
3514/**
3515 * sys_sched_setparam - set/change the RT priority of a thread
3516 * @pid: the pid in question.
3517 * @param: structure containing the new RT priority.
3518 *
3519 * Return: 0 on success. An error code otherwise.
3520 */
3521SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3522{
3523	return do_sched_setscheduler(pid, -1, param);
3524}
3525
3526/**
3527 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3528 * @pid: the pid in question.
3529 *
3530 * Return: On success, the policy of the thread. Otherwise, a negative error
3531 * code.
3532 */
3533SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3534{
3535	struct task_struct *p;
3536	int retval;
3537
3538	if (pid < 0)
3539		return -EINVAL;
3540
3541	retval = -ESRCH;
3542	rcu_read_lock();
3543	p = find_process_by_pid(pid);
3544	if (p) {
3545		retval = security_task_getscheduler(p);
3546		if (!retval)
3547			retval = p->policy
3548				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
3549	}
3550	rcu_read_unlock();
3551	return retval;
3552}
3553
3554/**
3555 * sys_sched_getparam - get the RT priority of a thread
3556 * @pid: the pid in question.
3557 * @param: structure containing the RT priority.
3558 *
3559 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
3560 * code.
3561 */
3562SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3563{
3564	struct sched_param lp;
3565	struct task_struct *p;
3566	int retval;
3567
3568	if (!param || pid < 0)
3569		return -EINVAL;
3570
3571	rcu_read_lock();
3572	p = find_process_by_pid(pid);
3573	retval = -ESRCH;
3574	if (!p)
3575		goto out_unlock;
3576
3577	retval = security_task_getscheduler(p);
3578	if (retval)
3579		goto out_unlock;
3580
3581	lp.sched_priority = p->rt_priority;
3582	rcu_read_unlock();
3583
3584	/*
3585	 * This one might sleep, we cannot do it with a spinlock held ...
3586	 */
3587	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3588
3589	return retval;
3590
3591out_unlock:
3592	rcu_read_unlock();
3593	return retval;
3594}
3595
3596long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
3597{
3598	cpumask_var_t cpus_allowed, new_mask;
3599	struct task_struct *p;
3600	int retval;
3601
3602	get_online_cpus();
3603	rcu_read_lock();
3604
3605	p = find_process_by_pid(pid);
3606	if (!p) {
3607		rcu_read_unlock();
3608		put_online_cpus();
3609		return -ESRCH;
3610	}
3611
3612	/* Prevent p going away */
3613	get_task_struct(p);
3614	rcu_read_unlock();
3615
3616	if (p->flags & PF_NO_SETAFFINITY) {
3617		retval = -EINVAL;
3618		goto out_put_task;
3619	}
3620	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
3621		retval = -ENOMEM;
3622		goto out_put_task;
3623	}
3624	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
3625		retval = -ENOMEM;
3626		goto out_free_cpus_allowed;
3627	}
3628	retval = -EPERM;
3629	if (!check_same_owner(p)) {
3630		rcu_read_lock();
3631		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
3632			rcu_read_unlock();
3633			goto out_unlock;
3634		}
3635		rcu_read_unlock();
3636	}
3637
3638	retval = security_task_setscheduler(p);
3639	if (retval)
3640		goto out_unlock;
3641
3642	cpuset_cpus_allowed(p, cpus_allowed);
3643	cpumask_and(new_mask, in_mask, cpus_allowed);
3644again:
3645	retval = set_cpus_allowed_ptr(p, new_mask);
3646
3647	if (!retval) {
3648		cpuset_cpus_allowed(p, cpus_allowed);
3649		if (!cpumask_subset(new_mask, cpus_allowed)) {
3650			/*
3651			 * We must have raced with a concurrent cpuset
3652			 * update. Just reset the cpus_allowed to the
3653			 * cpuset's cpus_allowed
3654			 */
3655			cpumask_copy(new_mask, cpus_allowed);
3656			goto again;
3657		}
3658	}
3659out_unlock:
3660	free_cpumask_var(new_mask);
3661out_free_cpus_allowed:
3662	free_cpumask_var(cpus_allowed);
3663out_put_task:
3664	put_task_struct(p);
3665	put_online_cpus();
3666	return retval;
3667}
3668
3669static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3670			     struct cpumask *new_mask)
3671{
3672	if (len < cpumask_size())
3673		cpumask_clear(new_mask);
3674	else if (len > cpumask_size())
3675		len = cpumask_size();
3676
3677	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
3678}
3679
3680/**
3681 * sys_sched_setaffinity - set the cpu affinity of a process
3682 * @pid: pid of the process
3683 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3684 * @user_mask_ptr: user-space pointer to the new cpu mask
3685 *
3686 * Return: 0 on success. An error code otherwise.
3687 */
3688SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3689		unsigned long __user *, user_mask_ptr)
3690{
3691	cpumask_var_t new_mask;
3692	int retval;
3693
3694	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
3695		return -ENOMEM;
3696
3697	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
3698	if (retval == 0)
3699		retval = sched_setaffinity(pid, new_mask);
3700	free_cpumask_var(new_mask);
3701	return retval;
3702}
3703
3704long sched_getaffinity(pid_t pid, struct cpumask *mask)
3705{
3706	struct task_struct *p;
3707	unsigned long flags;
3708	int retval;
3709
3710	get_online_cpus();
3711	rcu_read_lock();
3712
3713	retval = -ESRCH;
3714	p = find_process_by_pid(pid);
3715	if (!p)
3716		goto out_unlock;
3717
3718	retval = security_task_getscheduler(p);
3719	if (retval)
3720		goto out_unlock;
3721
3722	raw_spin_lock_irqsave(&p->pi_lock, flags);
3723	cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
3724	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3725
3726out_unlock:
3727	rcu_read_unlock();
3728	put_online_cpus();
3729
3730	return retval;
3731}
3732
3733/**
3734 * sys_sched_getaffinity - get the cpu affinity of a process
3735 * @pid: pid of the process
3736 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3737 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3738 *
3739 * Return: 0 on success. An error code otherwise.
3740 */
3741SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3742		unsigned long __user *, user_mask_ptr)
3743{
3744	int ret;
3745	cpumask_var_t mask;
3746
3747	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
3748		return -EINVAL;
3749	if (len & (sizeof(unsigned long)-1))
3750		return -EINVAL;
3751
3752	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
3753		return -ENOMEM;
3754
3755	ret = sched_getaffinity(pid, mask);
3756	if (ret == 0) {
3757		size_t retlen = min_t(size_t, len, cpumask_size());
3758
3759		if (copy_to_user(user_mask_ptr, mask, retlen))
3760			ret = -EFAULT;
3761		else
3762			ret = retlen;
3763	}
3764	free_cpumask_var(mask);
3765
3766	return ret;
3767}
3768
3769/**
3770 * sys_sched_yield - yield the current processor to other threads.
3771 *
3772 * This function yields the current CPU to other tasks. If there are no
3773 * other threads running on this CPU then this function will return.
3774 *
3775 * Return: 0.
3776 */
3777SYSCALL_DEFINE0(sched_yield)
3778{
3779	struct rq *rq = this_rq_lock();
3780
3781	schedstat_inc(rq, yld_count);
3782	current->sched_class->yield_task(rq);
3783
3784	/*
3785	 * Since we are going to call schedule() anyway, there's
3786	 * no need to preempt or enable interrupts:
3787	 */
3788	__release(rq->lock);
3789	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3790	do_raw_spin_unlock(&rq->lock);
3791	sched_preempt_enable_no_resched();
3792
3793	schedule();
3794
3795	return 0;
3796}
3797
3798static inline int should_resched(void)
3799{
3800	return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
3801}
3802
3803static void __cond_resched(void)
3804{
3805	add_preempt_count(PREEMPT_ACTIVE);
3806	__schedule();
3807	sub_preempt_count(PREEMPT_ACTIVE);
3808}
3809
3810int __sched _cond_resched(void)
3811{
3812	if (should_resched()) {
3813		__cond_resched();
3814		return 1;
3815	}
3816	return 0;
3817}
3818EXPORT_SYMBOL(_cond_resched);
3819
3820/*
3821 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
3822 * call schedule, and on return reacquire the lock.
3823 *
3824 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
3825 * operations here to prevent schedule() from being called twice (once via
3826 * spin_unlock(), once by hand).
3827 */
3828int __cond_resched_lock(spinlock_t *lock)
3829{
3830	int resched = should_resched();
3831	int ret = 0;
3832
3833	lockdep_assert_held(lock);
3834
3835	if (spin_needbreak(lock) || resched) {
3836		spin_unlock(lock);
3837		if (resched)
3838			__cond_resched();
3839		else
3840			cpu_relax();
3841		ret = 1;
3842		spin_lock(lock);
3843	}
3844	return ret;
3845}
3846EXPORT_SYMBOL(__cond_resched_lock);
3847
3848int __sched __cond_resched_softirq(void)
3849{
3850	BUG_ON(!in_softirq());
3851
3852	if (should_resched()) {
3853		local_bh_enable();
3854		__cond_resched();
3855		local_bh_disable();
3856		return 1;
3857	}
3858	return 0;
3859}
3860EXPORT_SYMBOL(__cond_resched_softirq);
3861
3862/**
3863 * yield - yield the current processor to other threads.
3864 *
3865 * Do not ever use this function, there's a 99% chance you're doing it wrong.
3866 *
3867 * The scheduler is at all times free to pick the calling task as the most
3868 * eligible task to run, if removing the yield() call from your code breaks
3869 * it, its already broken.
3870 *
3871 * Typical broken usage is:
3872 *
3873 * while (!event)
3874 * 	yield();
3875 *
3876 * where one assumes that yield() will let 'the other' process run that will
3877 * make event true. If the current task is a SCHED_FIFO task that will never
3878 * happen. Never use yield() as a progress guarantee!!
3879 *
3880 * If you want to use yield() to wait for something, use wait_event().
3881 * If you want to use yield() to be 'nice' for others, use cond_resched().
3882 * If you still want to use yield(), do not!
3883 */
3884void __sched yield(void)
3885{
3886	set_current_state(TASK_RUNNING);
3887	sys_sched_yield();
3888}
3889EXPORT_SYMBOL(yield);
3890
3891/**
3892 * yield_to - yield the current processor to another thread in
3893 * your thread group, or accelerate that thread toward the
3894 * processor it's on.
3895 * @p: target task
3896 * @preempt: whether task preemption is allowed or not
3897 *
3898 * It's the caller's job to ensure that the target task struct
3899 * can't go away on us before we can do any checks.
3900 *
3901 * Return:
3902 *	true (>0) if we indeed boosted the target task.
3903 *	false (0) if we failed to boost the target.
3904 *	-ESRCH if there's no task to yield to.
3905 */
3906bool __sched yield_to(struct task_struct *p, bool preempt)
3907{
3908	struct task_struct *curr = current;
3909	struct rq *rq, *p_rq;
3910	unsigned long flags;
3911	int yielded = 0;
3912
3913	local_irq_save(flags);
3914	rq = this_rq();
3915
3916again:
3917	p_rq = task_rq(p);
3918	/*
3919	 * If we're the only runnable task on the rq and target rq also
3920	 * has only one task, there's absolutely no point in yielding.
3921	 */
3922	if (rq->nr_running == 1 && p_rq->nr_running == 1) {
3923		yielded = -ESRCH;
3924		goto out_irq;
3925	}
3926
3927	double_rq_lock(rq, p_rq);
3928	while (task_rq(p) != p_rq) {
3929		double_rq_unlock(rq, p_rq);
3930		goto again;
3931	}
3932
3933	if (!curr->sched_class->yield_to_task)
3934		goto out_unlock;
3935
3936	if (curr->sched_class != p->sched_class)
3937		goto out_unlock;
3938
3939	if (task_running(p_rq, p) || p->state)
3940		goto out_unlock;
3941
3942	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
3943	if (yielded) {
3944		schedstat_inc(rq, yld_count);
3945		/*
3946		 * Make p's CPU reschedule; pick_next_entity takes care of
3947		 * fairness.
3948		 */
3949		if (preempt && rq != p_rq)
3950			resched_task(p_rq->curr);
3951	}
3952
3953out_unlock:
3954	double_rq_unlock(rq, p_rq);
3955out_irq:
3956	local_irq_restore(flags);
3957
3958	if (yielded > 0)
3959		schedule();
3960
3961	return yielded;
3962}
3963EXPORT_SYMBOL_GPL(yield_to);
3964
3965/*
3966 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
3967 * that process accounting knows that this is a task in IO wait state.
3968 */
3969void __sched io_schedule(void)
3970{
3971	struct rq *rq = raw_rq();
3972
3973	delayacct_blkio_start();
3974	atomic_inc(&rq->nr_iowait);
3975	blk_flush_plug(current);
3976	current->in_iowait = 1;
3977	schedule();
3978	current->in_iowait = 0;
3979	atomic_dec(&rq->nr_iowait);
3980	delayacct_blkio_end();
3981}
3982EXPORT_SYMBOL(io_schedule);
3983
3984long __sched io_schedule_timeout(long timeout)
3985{
3986	struct rq *rq = raw_rq();
3987	long ret;
3988
3989	delayacct_blkio_start();
3990	atomic_inc(&rq->nr_iowait);
3991	blk_flush_plug(current);
3992	current->in_iowait = 1;
3993	ret = schedule_timeout(timeout);
3994	current->in_iowait = 0;
3995	atomic_dec(&rq->nr_iowait);
3996	delayacct_blkio_end();
3997	return ret;
3998}
3999
4000/**
4001 * sys_sched_get_priority_max - return maximum RT priority.
4002 * @policy: scheduling class.
4003 *
4004 * Return: On success, this syscall returns the maximum
4005 * rt_priority that can be used by a given scheduling class.
4006 * On failure, a negative error code is returned.
4007 */
4008SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4009{
4010	int ret = -EINVAL;
4011
4012	switch (policy) {
4013	case SCHED_FIFO:
4014	case SCHED_RR:
4015		ret = MAX_USER_RT_PRIO-1;
4016		break;
4017	case SCHED_NORMAL:
4018	case SCHED_BATCH:
4019	case SCHED_IDLE:
4020		ret = 0;
4021		break;
4022	}
4023	return ret;
4024}
4025
4026/**
4027 * sys_sched_get_priority_min - return minimum RT priority.
4028 * @policy: scheduling class.
4029 *
4030 * Return: On success, this syscall returns the minimum
4031 * rt_priority that can be used by a given scheduling class.
4032 * On failure, a negative error code is returned.
4033 */
4034SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4035{
4036	int ret = -EINVAL;
4037
4038	switch (policy) {
4039	case SCHED_FIFO:
4040	case SCHED_RR:
4041		ret = 1;
4042		break;
4043	case SCHED_NORMAL:
4044	case SCHED_BATCH:
4045	case SCHED_IDLE:
4046		ret = 0;
4047	}
4048	return ret;
4049}
4050
4051/**
4052 * sys_sched_rr_get_interval - return the default timeslice of a process.
4053 * @pid: pid of the process.
4054 * @interval: userspace pointer to the timeslice value.
4055 *
4056 * this syscall writes the default timeslice value of a given process
4057 * into the user-space timespec buffer. A value of '0' means infinity.
4058 *
4059 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4060 * an error code.
4061 */
4062SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4063		struct timespec __user *, interval)
4064{
4065	struct task_struct *p;
4066	unsigned int time_slice;
4067	unsigned long flags;
4068	struct rq *rq;
4069	int retval;
4070	struct timespec t;
4071
4072	if (pid < 0)
4073		return -EINVAL;
4074
4075	retval = -ESRCH;
4076	rcu_read_lock();
4077	p = find_process_by_pid(pid);
4078	if (!p)
4079		goto out_unlock;
4080
4081	retval = security_task_getscheduler(p);
4082	if (retval)
4083		goto out_unlock;
4084
4085	rq = task_rq_lock(p, &flags);
4086	time_slice = p->sched_class->get_rr_interval(rq, p);
4087	task_rq_unlock(rq, p, &flags);
4088
4089	rcu_read_unlock();
4090	jiffies_to_timespec(time_slice, &t);
4091	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4092	return retval;
4093
4094out_unlock:
4095	rcu_read_unlock();
4096	return retval;
4097}
4098
4099static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4100
4101void sched_show_task(struct task_struct *p)
4102{
4103	unsigned long free = 0;
4104	int ppid;
4105	unsigned state;
4106
4107	state = p->state ? __ffs(p->state) + 1 : 0;
4108	printk(KERN_INFO "%-15.15s %c", p->comm,
4109		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4110#if BITS_PER_LONG == 32
4111	if (state == TASK_RUNNING)
4112		printk(KERN_CONT " running  ");
4113	else
4114		printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4115#else
4116	if (state == TASK_RUNNING)
4117		printk(KERN_CONT "  running task    ");
4118	else
4119		printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4120#endif
4121#ifdef CONFIG_DEBUG_STACK_USAGE
4122	free = stack_not_used(p);
4123#endif
4124	rcu_read_lock();
4125	ppid = task_pid_nr(rcu_dereference(p->real_parent));
4126	rcu_read_unlock();
4127	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4128		task_pid_nr(p), ppid,
4129		(unsigned long)task_thread_info(p)->flags);
4130
4131	print_worker_info(KERN_INFO, p);
4132	show_stack(p, NULL);
4133}
4134
4135void show_state_filter(unsigned long state_filter)
4136{
4137	struct task_struct *g, *p;
4138
4139#if BITS_PER_LONG == 32
4140	printk(KERN_INFO
4141		"  task                PC stack   pid father\n");
4142#else
4143	printk(KERN_INFO
4144		"  task                        PC stack   pid father\n");
4145#endif
4146	rcu_read_lock();
4147	do_each_thread(g, p) {
4148		/*
4149		 * reset the NMI-timeout, listing all files on a slow
4150		 * console might take a lot of time:
4151		 */
4152		touch_nmi_watchdog();
4153		if (!state_filter || (p->state & state_filter))
4154			sched_show_task(p);
4155	} while_each_thread(g, p);
4156
4157	touch_all_softlockup_watchdogs();
4158
4159#ifdef CONFIG_SCHED_DEBUG
4160	sysrq_sched_debug_show();
4161#endif
4162	rcu_read_unlock();
4163	/*
4164	 * Only show locks if all tasks are dumped:
4165	 */
4166	if (!state_filter)
4167		debug_show_all_locks();
4168}
4169
4170void init_idle_bootup_task(struct task_struct *idle)
4171{
4172	idle->sched_class = &idle_sched_class;
4173}
4174
4175/**
4176 * init_idle - set up an idle thread for a given CPU
4177 * @idle: task in question
4178 * @cpu: cpu the idle task belongs to
4179 *
4180 * NOTE: this function does not set the idle thread's NEED_RESCHED
4181 * flag, to make booting more robust.
4182 */
4183void init_idle(struct task_struct *idle, int cpu)
4184{
4185	struct rq *rq = cpu_rq(cpu);
4186	unsigned long flags;
4187
4188	raw_spin_lock_irqsave(&rq->lock, flags);
4189
4190	__sched_fork(idle);
4191	idle->state = TASK_RUNNING;
4192	idle->se.exec_start = sched_clock();
4193
4194	do_set_cpus_allowed(idle, cpumask_of(cpu));
4195	/*
4196	 * We're having a chicken and egg problem, even though we are
4197	 * holding rq->lock, the cpu isn't yet set to this cpu so the
4198	 * lockdep check in task_group() will fail.
4199	 *
4200	 * Similar case to sched_fork(). / Alternatively we could
4201	 * use task_rq_lock() here and obtain the other rq->lock.
4202	 *
4203	 * Silence PROVE_RCU
4204	 */
4205	rcu_read_lock();
4206	__set_task_cpu(idle, cpu);
4207	rcu_read_unlock();
4208
4209	rq->curr = rq->idle = idle;
4210#if defined(CONFIG_SMP)
4211	idle->on_cpu = 1;
4212#endif
4213	raw_spin_unlock_irqrestore(&rq->lock, flags);
4214
4215	/* Set the preempt count _outside_ the spinlocks! */
4216	task_thread_info(idle)->preempt_count = 0;
4217
4218	/*
4219	 * The idle tasks have their own, simple scheduling class:
4220	 */
4221	idle->sched_class = &idle_sched_class;
4222	ftrace_graph_init_idle_task(idle, cpu);
4223	vtime_init_idle(idle, cpu);
4224#if defined(CONFIG_SMP)
4225	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4226#endif
4227}
4228
4229#ifdef CONFIG_SMP
4230void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4231{
4232	if (p->sched_class && p->sched_class->set_cpus_allowed)
4233		p->sched_class->set_cpus_allowed(p, new_mask);
4234
4235	cpumask_copy(&p->cpus_allowed, new_mask);
4236	p->nr_cpus_allowed = cpumask_weight(new_mask);
4237}
4238
4239/*
4240 * This is how migration works:
4241 *
4242 * 1) we invoke migration_cpu_stop() on the target CPU using
4243 *    stop_one_cpu().
4244 * 2) stopper starts to run (implicitly forcing the migrated thread
4245 *    off the CPU)
4246 * 3) it checks whether the migrated task is still in the wrong runqueue.
4247 * 4) if it's in the wrong runqueue then the migration thread removes
4248 *    it and puts it into the right queue.
4249 * 5) stopper completes and stop_one_cpu() returns and the migration
4250 *    is done.
4251 */
4252
4253/*
4254 * Change a given task's CPU affinity. Migrate the thread to a
4255 * proper CPU and schedule it away if the CPU it's executing on
4256 * is removed from the allowed bitmask.
4257 *
4258 * NOTE: the caller must have a valid reference to the task, the
4259 * task must not exit() & deallocate itself prematurely. The
4260 * call is not atomic; no spinlocks may be held.
4261 */
4262int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4263{
4264	unsigned long flags;
4265	struct rq *rq;
4266	unsigned int dest_cpu;
4267	int ret = 0;
4268
4269	rq = task_rq_lock(p, &flags);
4270
4271	if (cpumask_equal(&p->cpus_allowed, new_mask))
4272		goto out;
4273
4274	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
4275		ret = -EINVAL;
4276		goto out;
4277	}
4278
4279	do_set_cpus_allowed(p, new_mask);
4280
4281	/* Can the task run on the task's current CPU? If so, we're done */
4282	if (cpumask_test_cpu(task_cpu(p), new_mask))
4283		goto out;
4284
4285	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4286	if (p->on_rq) {
4287		struct migration_arg arg = { p, dest_cpu };
4288		/* Need help from migration thread: drop lock and wait. */
4289		task_rq_unlock(rq, p, &flags);
4290		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
4291		tlb_migrate_finish(p->mm);
4292		return 0;
4293	}
4294out:
4295	task_rq_unlock(rq, p, &flags);
4296
4297	return ret;
4298}
4299EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
4300
4301/*
4302 * Move (not current) task off this cpu, onto dest cpu. We're doing
4303 * this because either it can't run here any more (set_cpus_allowed()
4304 * away from this CPU, or CPU going down), or because we're
4305 * attempting to rebalance this task on exec (sched_exec).
4306 *
4307 * So we race with normal scheduler movements, but that's OK, as long
4308 * as the task is no longer on this CPU.
4309 *
4310 * Returns non-zero if task was successfully migrated.
4311 */
4312static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4313{
4314	struct rq *rq_dest, *rq_src;
4315	int ret = 0;
4316
4317	if (unlikely(!cpu_active(dest_cpu)))
4318		return ret;
4319
4320	rq_src = cpu_rq(src_cpu);
4321	rq_dest = cpu_rq(dest_cpu);
4322
4323	raw_spin_lock(&p->pi_lock);
4324	double_rq_lock(rq_src, rq_dest);
4325	/* Already moved. */
4326	if (task_cpu(p) != src_cpu)
4327		goto done;
4328	/* Affinity changed (again). */
4329	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
4330		goto fail;
4331
4332	/*
4333	 * If we're not on a rq, the next wake-up will ensure we're
4334	 * placed properly.
4335	 */
4336	if (p->on_rq) {
4337		dequeue_task(rq_src, p, 0);
4338		set_task_cpu(p, dest_cpu);
4339		enqueue_task(rq_dest, p, 0);
4340		check_preempt_curr(rq_dest, p, 0);
4341	}
4342done:
4343	ret = 1;
4344fail:
4345	double_rq_unlock(rq_src, rq_dest);
4346	raw_spin_unlock(&p->pi_lock);
4347	return ret;
4348}
4349
4350/*
4351 * migration_cpu_stop - this will be executed by a highprio stopper thread
4352 * and performs thread migration by bumping thread off CPU then
4353 * 'pushing' onto another runqueue.
4354 */
4355static int migration_cpu_stop(void *data)
4356{
4357	struct migration_arg *arg = data;
4358
4359	/*
4360	 * The original target cpu might have gone down and we might
4361	 * be on another cpu but it doesn't matter.
4362	 */
4363	local_irq_disable();
4364	__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
4365	local_irq_enable();
4366	return 0;
4367}
4368
4369#ifdef CONFIG_HOTPLUG_CPU
4370
4371/*
4372 * Ensures that the idle task is using init_mm right before its cpu goes
4373 * offline.
4374 */
4375void idle_task_exit(void)
4376{
4377	struct mm_struct *mm = current->active_mm;
4378
4379	BUG_ON(cpu_online(smp_processor_id()));
4380
4381	if (mm != &init_mm)
4382		switch_mm(mm, &init_mm, current);
4383	mmdrop(mm);
4384}
4385
4386/*
4387 * Since this CPU is going 'away' for a while, fold any nr_active delta
4388 * we might have. Assumes we're called after migrate_tasks() so that the
4389 * nr_active count is stable.
4390 *
4391 * Also see the comment "Global load-average calculations".
4392 */
4393static void calc_load_migrate(struct rq *rq)
4394{
4395	long delta = calc_load_fold_active(rq);
4396	if (delta)
4397		atomic_long_add(delta, &calc_load_tasks);
4398}
4399
4400/*
4401 * Migrate all tasks from the rq, sleeping tasks will be migrated by
4402 * try_to_wake_up()->select_task_rq().
4403 *
4404 * Called with rq->lock held even though we'er in stop_machine() and
4405 * there's no concurrency possible, we hold the required locks anyway
4406 * because of lock validation efforts.
4407 */
4408static void migrate_tasks(unsigned int dead_cpu)
4409{
4410	struct rq *rq = cpu_rq(dead_cpu);
4411	struct task_struct *next, *stop = rq->stop;
4412	int dest_cpu;
4413
4414	/*
4415	 * Fudge the rq selection such that the below task selection loop
4416	 * doesn't get stuck on the currently eligible stop task.
4417	 *
4418	 * We're currently inside stop_machine() and the rq is either stuck
4419	 * in the stop_machine_cpu_stop() loop, or we're executing this code,
4420	 * either way we should never end up calling schedule() until we're
4421	 * done here.
4422	 */
4423	rq->stop = NULL;
4424
4425	/*
4426	 * put_prev_task() and pick_next_task() sched
4427	 * class method both need to have an up-to-date
4428	 * value of rq->clock[_task]
4429	 */
4430	update_rq_clock(rq);
4431
4432	for ( ; ; ) {
4433		/*
4434		 * There's this thread running, bail when that's the only
4435		 * remaining thread.
4436		 */
4437		if (rq->nr_running == 1)
4438			break;
4439
4440		next = pick_next_task(rq);
4441		BUG_ON(!next);
4442		next->sched_class->put_prev_task(rq, next);
4443
4444		/* Find suitable destination for @next, with force if needed. */
4445		dest_cpu = select_fallback_rq(dead_cpu, next);
4446		raw_spin_unlock(&rq->lock);
4447
4448		__migrate_task(next, dead_cpu, dest_cpu);
4449
4450		raw_spin_lock(&rq->lock);
4451	}
4452
4453	rq->stop = stop;
4454}
4455
4456#endif /* CONFIG_HOTPLUG_CPU */
4457
4458#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
4459
4460static struct ctl_table sd_ctl_dir[] = {
4461	{
4462		.procname	= "sched_domain",
4463		.mode		= 0555,
4464	},
4465	{}
4466};
4467
4468static struct ctl_table sd_ctl_root[] = {
4469	{
4470		.procname	= "kernel",
4471		.mode		= 0555,
4472		.child		= sd_ctl_dir,
4473	},
4474	{}
4475};
4476
4477static struct ctl_table *sd_alloc_ctl_entry(int n)
4478{
4479	struct ctl_table *entry =
4480		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
4481
4482	return entry;
4483}
4484
4485static void sd_free_ctl_entry(struct ctl_table **tablep)
4486{
4487	struct ctl_table *entry;
4488
4489	/*
4490	 * In the intermediate directories, both the child directory and
4491	 * procname are dynamically allocated and could fail but the mode
4492	 * will always be set. In the lowest directory the names are
4493	 * static strings and all have proc handlers.
4494	 */
4495	for (entry = *tablep; entry->mode; entry++) {
4496		if (entry->child)
4497			sd_free_ctl_entry(&entry->child);
4498		if (entry->proc_handler == NULL)
4499			kfree(entry->procname);
4500	}
4501
4502	kfree(*tablep);
4503	*tablep = NULL;
4504}
4505
4506static int min_load_idx = 0;
4507static int max_load_idx = CPU_LOAD_IDX_MAX-1;
4508
4509static void
4510set_table_entry(struct ctl_table *entry,
4511		const char *procname, void *data, int maxlen,
4512		umode_t mode, proc_handler *proc_handler,
4513		bool load_idx)
4514{
4515	entry->procname = procname;
4516	entry->data = data;
4517	entry->maxlen = maxlen;
4518	entry->mode = mode;
4519	entry->proc_handler = proc_handler;
4520
4521	if (load_idx) {
4522		entry->extra1 = &min_load_idx;
4523		entry->extra2 = &max_load_idx;
4524	}
4525}
4526
4527static struct ctl_table *
4528sd_alloc_ctl_domain_table(struct sched_domain *sd)
4529{
4530	struct ctl_table *table = sd_alloc_ctl_entry(13);
4531
4532	if (table == NULL)
4533		return NULL;
4534
4535	set_table_entry(&table[0], "min_interval", &sd->min_interval,
4536		sizeof(long), 0644, proc_doulongvec_minmax, false);
4537	set_table_entry(&table[1], "max_interval", &sd->max_interval,
4538		sizeof(long), 0644, proc_doulongvec_minmax, false);
4539	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
4540		sizeof(int), 0644, proc_dointvec_minmax, true);
4541	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
4542		sizeof(int), 0644, proc_dointvec_minmax, true);
4543	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
4544		sizeof(int), 0644, proc_dointvec_minmax, true);
4545	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
4546		sizeof(int), 0644, proc_dointvec_minmax, true);
4547	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
4548		sizeof(int), 0644, proc_dointvec_minmax, true);
4549	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
4550		sizeof(int), 0644, proc_dointvec_minmax, false);
4551	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
4552		sizeof(int), 0644, proc_dointvec_minmax, false);
4553	set_table_entry(&table[9], "cache_nice_tries",
4554		&sd->cache_nice_tries,
4555		sizeof(int), 0644, proc_dointvec_minmax, false);
4556	set_table_entry(&table[10], "flags", &sd->flags,
4557		sizeof(int), 0644, proc_dointvec_minmax, false);
4558	set_table_entry(&table[11], "name", sd->name,
4559		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
4560	/* &table[12] is terminator */
4561
4562	return table;
4563}
4564
4565static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
4566{
4567	struct ctl_table *entry, *table;
4568	struct sched_domain *sd;
4569	int domain_num = 0, i;
4570	char buf[32];
4571
4572	for_each_domain(cpu, sd)
4573		domain_num++;
4574	entry = table = sd_alloc_ctl_entry(domain_num + 1);
4575	if (table == NULL)
4576		return NULL;
4577
4578	i = 0;
4579	for_each_domain(cpu, sd) {
4580		snprintf(buf, 32, "domain%d", i);
4581		entry->procname = kstrdup(buf, GFP_KERNEL);
4582		entry->mode = 0555;
4583		entry->child = sd_alloc_ctl_domain_table(sd);
4584		entry++;
4585		i++;
4586	}
4587	return table;
4588}
4589
4590static struct ctl_table_header *sd_sysctl_header;
4591static void register_sched_domain_sysctl(void)
4592{
4593	int i, cpu_num = num_possible_cpus();
4594	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4595	char buf[32];
4596
4597	WARN_ON(sd_ctl_dir[0].child);
4598	sd_ctl_dir[0].child = entry;
4599
4600	if (entry == NULL)
4601		return;
4602
4603	for_each_possible_cpu(i) {
4604		snprintf(buf, 32, "cpu%d", i);
4605		entry->procname = kstrdup(buf, GFP_KERNEL);
4606		entry->mode = 0555;
4607		entry->child = sd_alloc_ctl_cpu_table(i);
4608		entry++;
4609	}
4610
4611	WARN_ON(sd_sysctl_header);
4612	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
4613}
4614
4615/* may be called multiple times per register */
4616static void unregister_sched_domain_sysctl(void)
4617{
4618	if (sd_sysctl_header)
4619		unregister_sysctl_table(sd_sysctl_header);
4620	sd_sysctl_header = NULL;
4621	if (sd_ctl_dir[0].child)
4622		sd_free_ctl_entry(&sd_ctl_dir[0].child);
4623}
4624#else
4625static void register_sched_domain_sysctl(void)
4626{
4627}
4628static void unregister_sched_domain_sysctl(void)
4629{
4630}
4631#endif
4632
4633static void set_rq_online(struct rq *rq)
4634{
4635	if (!rq->online) {
4636		const struct sched_class *class;
4637
4638		cpumask_set_cpu(rq->cpu, rq->rd->online);
4639		rq->online = 1;
4640
4641		for_each_class(class) {
4642			if (class->rq_online)
4643				class->rq_online(rq);
4644		}
4645	}
4646}
4647
4648static void set_rq_offline(struct rq *rq)
4649{
4650	if (rq->online) {
4651		const struct sched_class *class;
4652
4653		for_each_class(class) {
4654			if (class->rq_offline)
4655				class->rq_offline(rq);
4656		}
4657
4658		cpumask_clear_cpu(rq->cpu, rq->rd->online);
4659		rq->online = 0;
4660	}
4661}
4662
4663/*
4664 * migration_call - callback that gets triggered when a CPU is added.
4665 * Here we can start up the necessary migration thread for the new CPU.
4666 */
4667static int
4668migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
4669{
4670	int cpu = (long)hcpu;
4671	unsigned long flags;
4672	struct rq *rq = cpu_rq(cpu);
4673
4674	switch (action & ~CPU_TASKS_FROZEN) {
4675
4676	case CPU_UP_PREPARE:
4677		rq->calc_load_update = calc_load_update;
4678		break;
4679
4680	case CPU_ONLINE:
4681		/* Update our root-domain */
4682		raw_spin_lock_irqsave(&rq->lock, flags);
4683		if (rq->rd) {
4684			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
4685
4686			set_rq_online(rq);
4687		}
4688		raw_spin_unlock_irqrestore(&rq->lock, flags);
4689		break;
4690
4691#ifdef CONFIG_HOTPLUG_CPU
4692	case CPU_DYING:
4693		sched_ttwu_pending();
4694		/* Update our root-domain */
4695		raw_spin_lock_irqsave(&rq->lock, flags);
4696		if (rq->rd) {
4697			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
4698			set_rq_offline(rq);
4699		}
4700		migrate_tasks(cpu);
4701		BUG_ON(rq->nr_running != 1); /* the migration thread */
4702		raw_spin_unlock_irqrestore(&rq->lock, flags);
4703		break;
4704
4705	case CPU_DEAD:
4706		calc_load_migrate(rq);
4707		break;
4708#endif
4709	}
4710
4711	update_max_interval();
4712
4713	return NOTIFY_OK;
4714}
4715
4716/*
4717 * Register at high priority so that task migration (migrate_all_tasks)
4718 * happens before everything else.  This has to be lower priority than
4719 * the notifier in the perf_event subsystem, though.
4720 */
4721static struct notifier_block migration_notifier = {
4722	.notifier_call = migration_call,
4723	.priority = CPU_PRI_MIGRATION,
4724};
4725
4726static int sched_cpu_active(struct notifier_block *nfb,
4727				      unsigned long action, void *hcpu)
4728{
4729	switch (action & ~CPU_TASKS_FROZEN) {
4730	case CPU_STARTING:
4731	case CPU_DOWN_FAILED:
4732		set_cpu_active((long)hcpu, true);
4733		return NOTIFY_OK;
4734	default:
4735		return NOTIFY_DONE;
4736	}
4737}
4738
4739static int sched_cpu_inactive(struct notifier_block *nfb,
4740					unsigned long action, void *hcpu)
4741{
4742	switch (action & ~CPU_TASKS_FROZEN) {
4743	case CPU_DOWN_PREPARE:
4744		set_cpu_active((long)hcpu, false);
4745		return NOTIFY_OK;
4746	default:
4747		return NOTIFY_DONE;
4748	}
4749}
4750
4751static int __init migration_init(void)
4752{
4753	void *cpu = (void *)(long)smp_processor_id();
4754	int err;
4755
4756	/* Initialize migration for the boot CPU */
4757	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
4758	BUG_ON(err == NOTIFY_BAD);
4759	migration_call(&migration_notifier, CPU_ONLINE, cpu);
4760	register_cpu_notifier(&migration_notifier);
4761
4762	/* Register cpu active notifiers */
4763	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
4764	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
4765
4766	return 0;
4767}
4768early_initcall(migration_init);
4769#endif
4770
4771#ifdef CONFIG_SMP
4772
4773static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
4774
4775#ifdef CONFIG_SCHED_DEBUG
4776
4777static __read_mostly int sched_debug_enabled;
4778
4779static int __init sched_debug_setup(char *str)
4780{
4781	sched_debug_enabled = 1;
4782
4783	return 0;
4784}
4785early_param("sched_debug", sched_debug_setup);
4786
4787static inline bool sched_debug(void)
4788{
4789	return sched_debug_enabled;
4790}
4791
4792static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
4793				  struct cpumask *groupmask)
4794{
4795	struct sched_group *group = sd->groups;
4796	char str[256];
4797
4798	cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
4799	cpumask_clear(groupmask);
4800
4801	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
4802
4803	if (!(sd->flags & SD_LOAD_BALANCE)) {
4804		printk("does not load-balance\n");
4805		if (sd->parent)
4806			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
4807					" has parent");
4808		return -1;
4809	}
4810
4811	printk(KERN_CONT "span %s level %s\n", str, sd->name);
4812
4813	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
4814		printk(KERN_ERR "ERROR: domain->span does not contain "
4815				"CPU%d\n", cpu);
4816	}
4817	if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
4818		printk(KERN_ERR "ERROR: domain->groups does not contain"
4819				" CPU%d\n", cpu);
4820	}
4821
4822	printk(KERN_DEBUG "%*s groups:", level + 1, "");
4823	do {
4824		if (!group) {
4825			printk("\n");
4826			printk(KERN_ERR "ERROR: group is NULL\n");
4827			break;
4828		}
4829
4830		/*
4831		 * Even though we initialize ->power to something semi-sane,
4832		 * we leave power_orig unset. This allows us to detect if
4833		 * domain iteration is still funny without causing /0 traps.
4834		 */
4835		if (!group->sgp->power_orig) {
4836			printk(KERN_CONT "\n");
4837			printk(KERN_ERR "ERROR: domain->cpu_power not "
4838					"set\n");
4839			break;
4840		}
4841
4842		if (!cpumask_weight(sched_group_cpus(group))) {
4843			printk(KERN_CONT "\n");
4844			printk(KERN_ERR "ERROR: empty group\n");
4845			break;
4846		}
4847
4848		if (!(sd->flags & SD_OVERLAP) &&
4849		    cpumask_intersects(groupmask, sched_group_cpus(group))) {
4850			printk(KERN_CONT "\n");
4851			printk(KERN_ERR "ERROR: repeated CPUs\n");
4852			break;
4853		}
4854
4855		cpumask_or(groupmask, groupmask, sched_group_cpus(group));
4856
4857		cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
4858
4859		printk(KERN_CONT " %s", str);
4860		if (group->sgp->power != SCHED_POWER_SCALE) {
4861			printk(KERN_CONT " (cpu_power = %d)",
4862				group->sgp->power);
4863		}
4864
4865		group = group->next;
4866	} while (group != sd->groups);
4867	printk(KERN_CONT "\n");
4868
4869	if (!cpumask_equal(sched_domain_span(sd), groupmask))
4870		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
4871
4872	if (sd->parent &&
4873	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
4874		printk(KERN_ERR "ERROR: parent span is not a superset "
4875			"of domain->span\n");
4876	return 0;
4877}
4878
4879static void sched_domain_debug(struct sched_domain *sd, int cpu)
4880{
4881	int level = 0;
4882
4883	if (!sched_debug_enabled)
4884		return;
4885
4886	if (!sd) {
4887		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
4888		return;
4889	}
4890
4891	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
4892
4893	for (;;) {
4894		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4895			break;
4896		level++;
4897		sd = sd->parent;
4898		if (!sd)
4899			break;
4900	}
4901}
4902#else /* !CONFIG_SCHED_DEBUG */
4903# define sched_domain_debug(sd, cpu) do { } while (0)
4904static inline bool sched_debug(void)
4905{
4906	return false;
4907}
4908#endif /* CONFIG_SCHED_DEBUG */
4909
4910static int sd_degenerate(struct sched_domain *sd)
4911{
4912	if (cpumask_weight(sched_domain_span(sd)) == 1)
4913		return 1;
4914
4915	/* Following flags need at least 2 groups */
4916	if (sd->flags & (SD_LOAD_BALANCE |
4917			 SD_BALANCE_NEWIDLE |
4918			 SD_BALANCE_FORK |
4919			 SD_BALANCE_EXEC |
4920			 SD_SHARE_CPUPOWER |
4921			 SD_SHARE_PKG_RESOURCES)) {
4922		if (sd->groups != sd->groups->next)
4923			return 0;
4924	}
4925
4926	/* Following flags don't use groups */
4927	if (sd->flags & (SD_WAKE_AFFINE))
4928		return 0;
4929
4930	return 1;
4931}
4932
4933static int
4934sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
4935{
4936	unsigned long cflags = sd->flags, pflags = parent->flags;
4937
4938	if (sd_degenerate(parent))
4939		return 1;
4940
4941	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
4942		return 0;
4943
4944	/* Flags needing groups don't count if only 1 group in parent */
4945	if (parent->groups == parent->groups->next) {
4946		pflags &= ~(SD_LOAD_BALANCE |
4947				SD_BALANCE_NEWIDLE |
4948				SD_BALANCE_FORK |
4949				SD_BALANCE_EXEC |
4950				SD_SHARE_CPUPOWER |
4951				SD_SHARE_PKG_RESOURCES |
4952				SD_PREFER_SIBLING);
4953		if (nr_node_ids == 1)
4954			pflags &= ~SD_SERIALIZE;
4955	}
4956	if (~cflags & pflags)
4957		return 0;
4958
4959	return 1;
4960}
4961
4962static void free_rootdomain(struct rcu_head *rcu)
4963{
4964	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
4965
4966	cpupri_cleanup(&rd->cpupri);
4967	free_cpumask_var(rd->rto_mask);
4968	free_cpumask_var(rd->online);
4969	free_cpumask_var(rd->span);
4970	kfree(rd);
4971}
4972
4973static void rq_attach_root(struct rq *rq, struct root_domain *rd)
4974{
4975	struct root_domain *old_rd = NULL;
4976	unsigned long flags;
4977
4978	raw_spin_lock_irqsave(&rq->lock, flags);
4979
4980	if (rq->rd) {
4981		old_rd = rq->rd;
4982
4983		if (cpumask_test_cpu(rq->cpu, old_rd->online))
4984			set_rq_offline(rq);
4985
4986		cpumask_clear_cpu(rq->cpu, old_rd->span);
4987
4988		/*
4989		 * If we dont want to free the old_rt yet then
4990		 * set old_rd to NULL to skip the freeing later
4991		 * in this function:
4992		 */
4993		if (!atomic_dec_and_test(&old_rd->refcount))
4994			old_rd = NULL;
4995	}
4996
4997	atomic_inc(&rd->refcount);
4998	rq->rd = rd;
4999
5000	cpumask_set_cpu(rq->cpu, rd->span);
5001	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5002		set_rq_online(rq);
5003
5004	raw_spin_unlock_irqrestore(&rq->lock, flags);
5005
5006	if (old_rd)
5007		call_rcu_sched(&old_rd->rcu, free_rootdomain);
5008}
5009
5010static int init_rootdomain(struct root_domain *rd)
5011{
5012	memset(rd, 0, sizeof(*rd));
5013
5014	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
5015		goto out;
5016	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
5017		goto free_span;
5018	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5019		goto free_online;
5020
5021	if (cpupri_init(&rd->cpupri) != 0)
5022		goto free_rto_mask;
5023	return 0;
5024
5025free_rto_mask:
5026	free_cpumask_var(rd->rto_mask);
5027free_online:
5028	free_cpumask_var(rd->online);
5029free_span:
5030	free_cpumask_var(rd->span);
5031out:
5032	return -ENOMEM;
5033}
5034
5035/*
5036 * By default the system creates a single root-domain with all cpus as
5037 * members (mimicking the global state we have today).
5038 */
5039struct root_domain def_root_domain;
5040
5041static void init_defrootdomain(void)
5042{
5043	init_rootdomain(&def_root_domain);
5044
5045	atomic_set(&def_root_domain.refcount, 1);
5046}
5047
5048static struct root_domain *alloc_rootdomain(void)
5049{
5050	struct root_domain *rd;
5051
5052	rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5053	if (!rd)
5054		return NULL;
5055
5056	if (init_rootdomain(rd) != 0) {
5057		kfree(rd);
5058		return NULL;
5059	}
5060
5061	return rd;
5062}
5063
5064static void free_sched_groups(struct sched_group *sg, int free_sgp)
5065{
5066	struct sched_group *tmp, *first;
5067
5068	if (!sg)
5069		return;
5070
5071	first = sg;
5072	do {
5073		tmp = sg->next;
5074
5075		if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5076			kfree(sg->sgp);
5077
5078		kfree(sg);
5079		sg = tmp;
5080	} while (sg != first);
5081}
5082
5083static void free_sched_domain(struct rcu_head *rcu)
5084{
5085	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5086
5087	/*
5088	 * If its an overlapping domain it has private groups, iterate and
5089	 * nuke them all.
5090	 */
5091	if (sd->flags & SD_OVERLAP) {
5092		free_sched_groups(sd->groups, 1);
5093	} else if (atomic_dec_and_test(&sd->groups->ref)) {
5094		kfree(sd->groups->sgp);
5095		kfree(sd->groups);
5096	}
5097	kfree(sd);
5098}
5099
5100static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5101{
5102	call_rcu(&sd->rcu, free_sched_domain);
5103}
5104
5105static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5106{
5107	for (; sd; sd = sd->parent)
5108		destroy_sched_domain(sd, cpu);
5109}
5110
5111/*
5112 * Keep a special pointer to the highest sched_domain that has
5113 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5114 * allows us to avoid some pointer chasing select_idle_sibling().
5115 *
5116 * Also keep a unique ID per domain (we use the first cpu number in
5117 * the cpumask of the domain), this allows us to quickly tell if
5118 * two cpus are in the same cache domain, see cpus_share_cache().
5119 */
5120DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5121DEFINE_PER_CPU(int, sd_llc_size);
5122DEFINE_PER_CPU(int, sd_llc_id);
5123
5124static void update_top_cache_domain(int cpu)
5125{
5126	struct sched_domain *sd;
5127	int id = cpu;
5128	int size = 1;
5129
5130	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
5131	if (sd) {
5132		id = cpumask_first(sched_domain_span(sd));
5133		size = cpumask_weight(sched_domain_span(sd));
5134	}
5135
5136	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5137	per_cpu(sd_llc_size, cpu) = size;
5138	per_cpu(sd_llc_id, cpu) = id;
5139}
5140
5141/*
5142 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
5143 * hold the hotplug lock.
5144 */
5145static void
5146cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
5147{
5148	struct rq *rq = cpu_rq(cpu);
5149	struct sched_domain *tmp;
5150
5151	/* Remove the sched domains which do not contribute to scheduling. */
5152	for (tmp = sd; tmp; ) {
5153		struct sched_domain *parent = tmp->parent;
5154		if (!parent)
5155			break;
5156
5157		if (sd_parent_degenerate(tmp, parent)) {
5158			tmp->parent = parent->parent;
5159			if (parent->parent)
5160				parent->parent->child = tmp;
5161			/*
5162			 * Transfer SD_PREFER_SIBLING down in case of a
5163			 * degenerate parent; the spans match for this
5164			 * so the property transfers.
5165			 */
5166			if (parent->flags & SD_PREFER_SIBLING)
5167				tmp->flags |= SD_PREFER_SIBLING;
5168			destroy_sched_domain(parent, cpu);
5169		} else
5170			tmp = tmp->parent;
5171	}
5172
5173	if (sd && sd_degenerate(sd)) {
5174		tmp = sd;
5175		sd = sd->parent;
5176		destroy_sched_domain(tmp, cpu);
5177		if (sd)
5178			sd->child = NULL;
5179	}
5180
5181	sched_domain_debug(sd, cpu);
5182
5183	rq_attach_root(rq, rd);
5184	tmp = rq->sd;
5185	rcu_assign_pointer(rq->sd, sd);
5186	destroy_sched_domains(tmp, cpu);
5187
5188	update_top_cache_domain(cpu);
5189}
5190
5191/* cpus with isolated domains */
5192static cpumask_var_t cpu_isolated_map;
5193
5194/* Setup the mask of cpus configured for isolated domains */
5195static int __init isolated_cpu_setup(char *str)
5196{
5197	alloc_bootmem_cpumask_var(&cpu_isolated_map);
5198	cpulist_parse(str, cpu_isolated_map);
5199	return 1;
5200}
5201
5202__setup("isolcpus=", isolated_cpu_setup);
5203
5204static const struct cpumask *cpu_cpu_mask(int cpu)
5205{
5206	return cpumask_of_node(cpu_to_node(cpu));
5207}
5208
5209struct sd_data {
5210	struct sched_domain **__percpu sd;
5211	struct sched_group **__percpu sg;
5212	struct sched_group_power **__percpu sgp;
5213};
5214
5215struct s_data {
5216	struct sched_domain ** __percpu sd;
5217	struct root_domain	*rd;
5218};
5219
5220enum s_alloc {
5221	sa_rootdomain,
5222	sa_sd,
5223	sa_sd_storage,
5224	sa_none,
5225};
5226
5227struct sched_domain_topology_level;
5228
5229typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
5230typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5231
5232#define SDTL_OVERLAP	0x01
5233
5234struct sched_domain_topology_level {
5235	sched_domain_init_f init;
5236	sched_domain_mask_f mask;
5237	int		    flags;
5238	int		    numa_level;
5239	struct sd_data      data;
5240};
5241
5242/*
5243 * Build an iteration mask that can exclude certain CPUs from the upwards
5244 * domain traversal.
5245 *
5246 * Asymmetric node setups can result in situations where the domain tree is of
5247 * unequal depth, make sure to skip domains that already cover the entire
5248 * range.
5249 *
5250 * In that case build_sched_domains() will have terminated the iteration early
5251 * and our sibling sd spans will be empty. Domains should always include the
5252 * cpu they're built on, so check that.
5253 *
5254 */
5255static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5256{
5257	const struct cpumask *span = sched_domain_span(sd);
5258	struct sd_data *sdd = sd->private;
5259	struct sched_domain *sibling;
5260	int i;
5261
5262	for_each_cpu(i, span) {
5263		sibling = *per_cpu_ptr(sdd->sd, i);
5264		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5265			continue;
5266
5267		cpumask_set_cpu(i, sched_group_mask(sg));
5268	}
5269}
5270
5271/*
5272 * Return the canonical balance cpu for this group, this is the first cpu
5273 * of this group that's also in the iteration mask.
5274 */
5275int group_balance_cpu(struct sched_group *sg)
5276{
5277	return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5278}
5279
5280static int
5281build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5282{
5283	struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5284	const struct cpumask *span = sched_domain_span(sd);
5285	struct cpumask *covered = sched_domains_tmpmask;
5286	struct sd_data *sdd = sd->private;
5287	struct sched_domain *child;
5288	int i;
5289
5290	cpumask_clear(covered);
5291
5292	for_each_cpu(i, span) {
5293		struct cpumask *sg_span;
5294
5295		if (cpumask_test_cpu(i, covered))
5296			continue;
5297
5298		child = *per_cpu_ptr(sdd->sd, i);
5299
5300		/* See the comment near build_group_mask(). */
5301		if (!cpumask_test_cpu(i, sched_domain_span(child)))
5302			continue;
5303
5304		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5305				GFP_KERNEL, cpu_to_node(cpu));
5306
5307		if (!sg)
5308			goto fail;
5309
5310		sg_span = sched_group_cpus(sg);
5311		if (child->child) {
5312			child = child->child;
5313			cpumask_copy(sg_span, sched_domain_span(child));
5314		} else
5315			cpumask_set_cpu(i, sg_span);
5316
5317		cpumask_or(covered, covered, sg_span);
5318
5319		sg->sgp = *per_cpu_ptr(sdd->sgp, i);
5320		if (atomic_inc_return(&sg->sgp->ref) == 1)
5321			build_group_mask(sd, sg);
5322
5323		/*
5324		 * Initialize sgp->power such that even if we mess up the
5325		 * domains and no possible iteration will get us here, we won't
5326		 * die on a /0 trap.
5327		 */
5328		sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
5329
5330		/*
5331		 * Make sure the first group of this domain contains the
5332		 * canonical balance cpu. Otherwise the sched_domain iteration
5333		 * breaks. See update_sg_lb_stats().
5334		 */
5335		if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
5336		    group_balance_cpu(sg) == cpu)
5337			groups = sg;
5338
5339		if (!first)
5340			first = sg;
5341		if (last)
5342			last->next = sg;
5343		last = sg;
5344		last->next = first;
5345	}
5346	sd->groups = groups;
5347
5348	return 0;
5349
5350fail:
5351	free_sched_groups(first, 0);
5352
5353	return -ENOMEM;
5354}
5355
5356static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
5357{
5358	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5359	struct sched_domain *child = sd->child;
5360
5361	if (child)
5362		cpu = cpumask_first(sched_domain_span(child));
5363
5364	if (sg) {
5365		*sg = *per_cpu_ptr(sdd->sg, cpu);
5366		(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
5367		atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
5368	}
5369
5370	return cpu;
5371}
5372
5373/*
5374 * build_sched_groups will build a circular linked list of the groups
5375 * covered by the given span, and will set each group's ->cpumask correctly,
5376 * and ->cpu_power to 0.
5377 *
5378 * Assumes the sched_domain tree is fully constructed
5379 */
5380static int
5381build_sched_groups(struct sched_domain *sd, int cpu)
5382{
5383	struct sched_group *first = NULL, *last = NULL;
5384	struct sd_data *sdd = sd->private;
5385	const struct cpumask *span = sched_domain_span(sd);
5386	struct cpumask *covered;
5387	int i;
5388
5389	get_group(cpu, sdd, &sd->groups);
5390	atomic_inc(&sd->groups->ref);
5391
5392	if (cpu != cpumask_first(span))
5393		return 0;
5394
5395	lockdep_assert_held(&sched_domains_mutex);
5396	covered = sched_domains_tmpmask;
5397
5398	cpumask_clear(covered);
5399
5400	for_each_cpu(i, span) {
5401		struct sched_group *sg;
5402		int group, j;
5403
5404		if (cpumask_test_cpu(i, covered))
5405			continue;
5406
5407		group = get_group(i, sdd, &sg);
5408		cpumask_clear(sched_group_cpus(sg));
5409		sg->sgp->power = 0;
5410		cpumask_setall(sched_group_mask(sg));
5411
5412		for_each_cpu(j, span) {
5413			if (get_group(j, sdd, NULL) != group)
5414				continue;
5415
5416			cpumask_set_cpu(j, covered);
5417			cpumask_set_cpu(j, sched_group_cpus(sg));
5418		}
5419
5420		if (!first)
5421			first = sg;
5422		if (last)
5423			last->next = sg;
5424		last = sg;
5425	}
5426	last->next = first;
5427
5428	return 0;
5429}
5430
5431/*
5432 * Initialize sched groups cpu_power.
5433 *
5434 * cpu_power indicates the capacity of sched group, which is used while
5435 * distributing the load between different sched groups in a sched domain.
5436 * Typically cpu_power for all the groups in a sched domain will be same unless
5437 * there are asymmetries in the topology. If there are asymmetries, group
5438 * having more cpu_power will pickup more load compared to the group having
5439 * less cpu_power.
5440 */
5441static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5442{
5443	struct sched_group *sg = sd->groups;
5444
5445	WARN_ON(!sg);
5446
5447	do {
5448		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
5449		sg = sg->next;
5450	} while (sg != sd->groups);
5451
5452	if (cpu != group_balance_cpu(sg))
5453		return;
5454
5455	update_group_power(sd, cpu);
5456	atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
5457}
5458
5459int __weak arch_sd_sibling_asym_packing(void)
5460{
5461       return 0*SD_ASYM_PACKING;
5462}
5463
5464/*
5465 * Initializers for schedule domains
5466 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
5467 */
5468
5469#ifdef CONFIG_SCHED_DEBUG
5470# define SD_INIT_NAME(sd, type)		sd->name = #type
5471#else
5472# define SD_INIT_NAME(sd, type)		do { } while (0)
5473#endif
5474
5475#define SD_INIT_FUNC(type)						\
5476static noinline struct sched_domain *					\
5477sd_init_##type(struct sched_domain_topology_level *tl, int cpu) 	\
5478{									\
5479	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);	\
5480	*sd = SD_##type##_INIT;						\
5481	SD_INIT_NAME(sd, type);						\
5482	sd->private = &tl->data;					\
5483	return sd;							\
5484}
5485
5486SD_INIT_FUNC(CPU)
5487#ifdef CONFIG_SCHED_SMT
5488 SD_INIT_FUNC(SIBLING)
5489#endif
5490#ifdef CONFIG_SCHED_MC
5491 SD_INIT_FUNC(MC)
5492#endif
5493#ifdef CONFIG_SCHED_BOOK
5494 SD_INIT_FUNC(BOOK)
5495#endif
5496
5497static int default_relax_domain_level = -1;
5498int sched_domain_level_max;
5499
5500static int __init setup_relax_domain_level(char *str)
5501{
5502	if (kstrtoint(str, 0, &default_relax_domain_level))
5503		pr_warn("Unable to set relax_domain_level\n");
5504
5505	return 1;
5506}
5507__setup("relax_domain_level=", setup_relax_domain_level);
5508
5509static void set_domain_attribute(struct sched_domain *sd,
5510				 struct sched_domain_attr *attr)
5511{
5512	int request;
5513
5514	if (!attr || attr->relax_domain_level < 0) {
5515		if (default_relax_domain_level < 0)
5516			return;
5517		else
5518			request = default_relax_domain_level;
5519	} else
5520		request = attr->relax_domain_level;
5521	if (request < sd->level) {
5522		/* turn off idle balance on this domain */
5523		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
5524	} else {
5525		/* turn on idle balance on this domain */
5526		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
5527	}
5528}
5529
5530static void __sdt_free(const struct cpumask *cpu_map);
5531static int __sdt_alloc(const struct cpumask *cpu_map);
5532
5533static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
5534				 const struct cpumask *cpu_map)
5535{
5536	switch (what) {
5537	case sa_rootdomain:
5538		if (!atomic_read(&d->rd->refcount))
5539			free_rootdomain(&d->rd->rcu); /* fall through */
5540	case sa_sd:
5541		free_percpu(d->sd); /* fall through */
5542	case sa_sd_storage:
5543		__sdt_free(cpu_map); /* fall through */
5544	case sa_none:
5545		break;
5546	}
5547}
5548
5549static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
5550						   const struct cpumask *cpu_map)
5551{
5552	memset(d, 0, sizeof(*d));
5553
5554	if (__sdt_alloc(cpu_map))
5555		return sa_sd_storage;
5556	d->sd = alloc_percpu(struct sched_domain *);
5557	if (!d->sd)
5558		return sa_sd_storage;
5559	d->rd = alloc_rootdomain();
5560	if (!d->rd)
5561		return sa_sd;
5562	return sa_rootdomain;
5563}
5564
5565/*
5566 * NULL the sd_data elements we've used to build the sched_domain and
5567 * sched_group structure so that the subsequent __free_domain_allocs()
5568 * will not free the data we're using.
5569 */
5570static void claim_allocations(int cpu, struct sched_domain *sd)
5571{
5572	struct sd_data *sdd = sd->private;
5573
5574	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
5575	*per_cpu_ptr(sdd->sd, cpu) = NULL;
5576
5577	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
5578		*per_cpu_ptr(sdd->sg, cpu) = NULL;
5579
5580	if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
5581		*per_cpu_ptr(sdd->sgp, cpu) = NULL;
5582}
5583
5584#ifdef CONFIG_SCHED_SMT
5585static const struct cpumask *cpu_smt_mask(int cpu)
5586{
5587	return topology_thread_cpumask(cpu);
5588}
5589#endif
5590
5591/*
5592 * Topology list, bottom-up.
5593 */
5594static struct sched_domain_topology_level default_topology[] = {
5595#ifdef CONFIG_SCHED_SMT
5596	{ sd_init_SIBLING, cpu_smt_mask, },
5597#endif
5598#ifdef CONFIG_SCHED_MC
5599	{ sd_init_MC, cpu_coregroup_mask, },
5600#endif
5601#ifdef CONFIG_SCHED_BOOK
5602	{ sd_init_BOOK, cpu_book_mask, },
5603#endif
5604	{ sd_init_CPU, cpu_cpu_mask, },
5605	{ NULL, },
5606};
5607
5608static struct sched_domain_topology_level *sched_domain_topology = default_topology;
5609
5610#define for_each_sd_topology(tl)			\
5611	for (tl = sched_domain_topology; tl->init; tl++)
5612
5613#ifdef CONFIG_NUMA
5614
5615static int sched_domains_numa_levels;
5616static int *sched_domains_numa_distance;
5617static struct cpumask ***sched_domains_numa_masks;
5618static int sched_domains_curr_level;
5619
5620static inline int sd_local_flags(int level)
5621{
5622	if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
5623		return 0;
5624
5625	return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
5626}
5627
5628static struct sched_domain *
5629sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
5630{
5631	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
5632	int level = tl->numa_level;
5633	int sd_weight = cpumask_weight(
5634			sched_domains_numa_masks[level][cpu_to_node(cpu)]);
5635
5636	*sd = (struct sched_domain){
5637		.min_interval		= sd_weight,
5638		.max_interval		= 2*sd_weight,
5639		.busy_factor		= 32,
5640		.imbalance_pct		= 125,
5641		.cache_nice_tries	= 2,
5642		.busy_idx		= 3,
5643		.idle_idx		= 2,
5644		.newidle_idx		= 0,
5645		.wake_idx		= 0,
5646		.forkexec_idx		= 0,
5647
5648		.flags			= 1*SD_LOAD_BALANCE
5649					| 1*SD_BALANCE_NEWIDLE
5650					| 0*SD_BALANCE_EXEC
5651					| 0*SD_BALANCE_FORK
5652					| 0*SD_BALANCE_WAKE
5653					| 0*SD_WAKE_AFFINE
5654					| 0*SD_SHARE_CPUPOWER
5655					| 0*SD_SHARE_PKG_RESOURCES
5656					| 1*SD_SERIALIZE
5657					| 0*SD_PREFER_SIBLING
5658					| sd_local_flags(level)
5659					,
5660		.last_balance		= jiffies,
5661		.balance_interval	= sd_weight,
5662	};
5663	SD_INIT_NAME(sd, NUMA);
5664	sd->private = &tl->data;
5665
5666	/*
5667	 * Ugly hack to pass state to sd_numa_mask()...
5668	 */
5669	sched_domains_curr_level = tl->numa_level;
5670
5671	return sd;
5672}
5673
5674static const struct cpumask *sd_numa_mask(int cpu)
5675{
5676	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
5677}
5678
5679static void sched_numa_warn(const char *str)
5680{
5681	static int done = false;
5682	int i,j;
5683
5684	if (done)
5685		return;
5686
5687	done = true;
5688
5689	printk(KERN_WARNING "ERROR: %s\n\n", str);
5690
5691	for (i = 0; i < nr_node_ids; i++) {
5692		printk(KERN_WARNING "  ");
5693		for (j = 0; j < nr_node_ids; j++)
5694			printk(KERN_CONT "%02d ", node_distance(i,j));
5695		printk(KERN_CONT "\n");
5696	}
5697	printk(KERN_WARNING "\n");
5698}
5699
5700static bool find_numa_distance(int distance)
5701{
5702	int i;
5703
5704	if (distance == node_distance(0, 0))
5705		return true;
5706
5707	for (i = 0; i < sched_domains_numa_levels; i++) {
5708		if (sched_domains_numa_distance[i] == distance)
5709			return true;
5710	}
5711
5712	return false;
5713}
5714
5715static void sched_init_numa(void)
5716{
5717	int next_distance, curr_distance = node_distance(0, 0);
5718	struct sched_domain_topology_level *tl;
5719	int level = 0;
5720	int i, j, k;
5721
5722	sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
5723	if (!sched_domains_numa_distance)
5724		return;
5725
5726	/*
5727	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
5728	 * unique distances in the node_distance() table.
5729	 *
5730	 * Assumes node_distance(0,j) includes all distances in
5731	 * node_distance(i,j) in order to avoid cubic time.
5732	 */
5733	next_distance = curr_distance;
5734	for (i = 0; i < nr_node_ids; i++) {
5735		for (j = 0; j < nr_node_ids; j++) {
5736			for (k = 0; k < nr_node_ids; k++) {
5737				int distance = node_distance(i, k);
5738
5739				if (distance > curr_distance &&
5740				    (distance < next_distance ||
5741				     next_distance == curr_distance))
5742					next_distance = distance;
5743
5744				/*
5745				 * While not a strong assumption it would be nice to know
5746				 * about cases where if node A is connected to B, B is not
5747				 * equally connected to A.
5748				 */
5749				if (sched_debug() && node_distance(k, i) != distance)
5750					sched_numa_warn("Node-distance not symmetric");
5751
5752				if (sched_debug() && i && !find_numa_distance(distance))
5753					sched_numa_warn("Node-0 not representative");
5754			}
5755			if (next_distance != curr_distance) {
5756				sched_domains_numa_distance[level++] = next_distance;
5757				sched_domains_numa_levels = level;
5758				curr_distance = next_distance;
5759			} else break;
5760		}
5761
5762		/*
5763		 * In case of sched_debug() we verify the above assumption.
5764		 */
5765		if (!sched_debug())
5766			break;
5767	}
5768	/*
5769	 * 'level' contains the number of unique distances, excluding the
5770	 * identity distance node_distance(i,i).
5771	 *
5772	 * The sched_domains_numa_distance[] array includes the actual distance
5773	 * numbers.
5774	 */
5775
5776	/*
5777	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
5778	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
5779	 * the array will contain less then 'level' members. This could be
5780	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
5781	 * in other functions.
5782	 *
5783	 * We reset it to 'level' at the end of this function.
5784	 */
5785	sched_domains_numa_levels = 0;
5786
5787	sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
5788	if (!sched_domains_numa_masks)
5789		return;
5790
5791	/*
5792	 * Now for each level, construct a mask per node which contains all
5793	 * cpus of nodes that are that many hops away from us.
5794	 */
5795	for (i = 0; i < level; i++) {
5796		sched_domains_numa_masks[i] =
5797			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
5798		if (!sched_domains_numa_masks[i])
5799			return;
5800
5801		for (j = 0; j < nr_node_ids; j++) {
5802			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
5803			if (!mask)
5804				return;
5805
5806			sched_domains_numa_masks[i][j] = mask;
5807
5808			for (k = 0; k < nr_node_ids; k++) {
5809				if (node_distance(j, k) > sched_domains_numa_distance[i])
5810					continue;
5811
5812				cpumask_or(mask, mask, cpumask_of_node(k));
5813			}
5814		}
5815	}
5816
5817	tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
5818			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
5819	if (!tl)
5820		return;
5821
5822	/*
5823	 * Copy the default topology bits..
5824	 */
5825	for (i = 0; default_topology[i].init; i++)
5826		tl[i] = default_topology[i];
5827
5828	/*
5829	 * .. and append 'j' levels of NUMA goodness.
5830	 */
5831	for (j = 0; j < level; i++, j++) {
5832		tl[i] = (struct sched_domain_topology_level){
5833			.init = sd_numa_init,
5834			.mask = sd_numa_mask,
5835			.flags = SDTL_OVERLAP,
5836			.numa_level = j,
5837		};
5838	}
5839
5840	sched_domain_topology = tl;
5841
5842	sched_domains_numa_levels = level;
5843}
5844
5845static void sched_domains_numa_masks_set(int cpu)
5846{
5847	int i, j;
5848	int node = cpu_to_node(cpu);
5849
5850	for (i = 0; i < sched_domains_numa_levels; i++) {
5851		for (j = 0; j < nr_node_ids; j++) {
5852			if (node_distance(j, node) <= sched_domains_numa_distance[i])
5853				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
5854		}
5855	}
5856}
5857
5858static void sched_domains_numa_masks_clear(int cpu)
5859{
5860	int i, j;
5861	for (i = 0; i < sched_domains_numa_levels; i++) {
5862		for (j = 0; j < nr_node_ids; j++)
5863			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
5864	}
5865}
5866
5867/*
5868 * Update sched_domains_numa_masks[level][node] array when new cpus
5869 * are onlined.
5870 */
5871static int sched_domains_numa_masks_update(struct notifier_block *nfb,
5872					   unsigned long action,
5873					   void *hcpu)
5874{
5875	int cpu = (long)hcpu;
5876
5877	switch (action & ~CPU_TASKS_FROZEN) {
5878	case CPU_ONLINE:
5879		sched_domains_numa_masks_set(cpu);
5880		break;
5881
5882	case CPU_DEAD:
5883		sched_domains_numa_masks_clear(cpu);
5884		break;
5885
5886	default:
5887		return NOTIFY_DONE;
5888	}
5889
5890	return NOTIFY_OK;
5891}
5892#else
5893static inline void sched_init_numa(void)
5894{
5895}
5896
5897static int sched_domains_numa_masks_update(struct notifier_block *nfb,
5898					   unsigned long action,
5899					   void *hcpu)
5900{
5901	return 0;
5902}
5903#endif /* CONFIG_NUMA */
5904
5905static int __sdt_alloc(const struct cpumask *cpu_map)
5906{
5907	struct sched_domain_topology_level *tl;
5908	int j;
5909
5910	for_each_sd_topology(tl) {
5911		struct sd_data *sdd = &tl->data;
5912
5913		sdd->sd = alloc_percpu(struct sched_domain *);
5914		if (!sdd->sd)
5915			return -ENOMEM;
5916
5917		sdd->sg = alloc_percpu(struct sched_group *);
5918		if (!sdd->sg)
5919			return -ENOMEM;
5920
5921		sdd->sgp = alloc_percpu(struct sched_group_power *);
5922		if (!sdd->sgp)
5923			return -ENOMEM;
5924
5925		for_each_cpu(j, cpu_map) {
5926			struct sched_domain *sd;
5927			struct sched_group *sg;
5928			struct sched_group_power *sgp;
5929
5930		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
5931					GFP_KERNEL, cpu_to_node(j));
5932			if (!sd)
5933				return -ENOMEM;
5934
5935			*per_cpu_ptr(sdd->sd, j) = sd;
5936
5937			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5938					GFP_KERNEL, cpu_to_node(j));
5939			if (!sg)
5940				return -ENOMEM;
5941
5942			sg->next = sg;
5943
5944			*per_cpu_ptr(sdd->sg, j) = sg;
5945
5946			sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
5947					GFP_KERNEL, cpu_to_node(j));
5948			if (!sgp)
5949				return -ENOMEM;
5950
5951			*per_cpu_ptr(sdd->sgp, j) = sgp;
5952		}
5953	}
5954
5955	return 0;
5956}
5957
5958static void __sdt_free(const struct cpumask *cpu_map)
5959{
5960	struct sched_domain_topology_level *tl;
5961	int j;
5962
5963	for_each_sd_topology(tl) {
5964		struct sd_data *sdd = &tl->data;
5965
5966		for_each_cpu(j, cpu_map) {
5967			struct sched_domain *sd;
5968
5969			if (sdd->sd) {
5970				sd = *per_cpu_ptr(sdd->sd, j);
5971				if (sd && (sd->flags & SD_OVERLAP))
5972					free_sched_groups(sd->groups, 0);
5973				kfree(*per_cpu_ptr(sdd->sd, j));
5974			}
5975
5976			if (sdd->sg)
5977				kfree(*per_cpu_ptr(sdd->sg, j));
5978			if (sdd->sgp)
5979				kfree(*per_cpu_ptr(sdd->sgp, j));
5980		}
5981		free_percpu(sdd->sd);
5982		sdd->sd = NULL;
5983		free_percpu(sdd->sg);
5984		sdd->sg = NULL;
5985		free_percpu(sdd->sgp);
5986		sdd->sgp = NULL;
5987	}
5988}
5989
5990struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
5991		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
5992		struct sched_domain *child, int cpu)
5993{
5994	struct sched_domain *sd = tl->init(tl, cpu);
5995	if (!sd)
5996		return child;
5997
5998	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
5999	if (child) {
6000		sd->level = child->level + 1;
6001		sched_domain_level_max = max(sched_domain_level_max, sd->level);
6002		child->parent = sd;
6003		sd->child = child;
6004	}
6005	set_domain_attribute(sd, attr);
6006
6007	return sd;
6008}
6009
6010/*
6011 * Build sched domains for a given set of cpus and attach the sched domains
6012 * to the individual cpus
6013 */
6014static int build_sched_domains(const struct cpumask *cpu_map,
6015			       struct sched_domain_attr *attr)
6016{
6017	enum s_alloc alloc_state;
6018	struct sched_domain *sd;
6019	struct s_data d;
6020	int i, ret = -ENOMEM;
6021
6022	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6023	if (alloc_state != sa_rootdomain)
6024		goto error;
6025
6026	/* Set up domains for cpus specified by the cpu_map. */
6027	for_each_cpu(i, cpu_map) {
6028		struct sched_domain_topology_level *tl;
6029
6030		sd = NULL;
6031		for_each_sd_topology(tl) {
6032			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
6033			if (tl == sched_domain_topology)
6034				*per_cpu_ptr(d.sd, i) = sd;
6035			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6036				sd->flags |= SD_OVERLAP;
6037			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6038				break;
6039		}
6040	}
6041
6042	/* Build the groups for the domains */
6043	for_each_cpu(i, cpu_map) {
6044		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6045			sd->span_weight = cpumask_weight(sched_domain_span(sd));
6046			if (sd->flags & SD_OVERLAP) {
6047				if (build_overlap_sched_groups(sd, i))
6048					goto error;
6049			} else {
6050				if (build_sched_groups(sd, i))
6051					goto error;
6052			}
6053		}
6054	}
6055
6056	/* Calculate CPU power for physical packages and nodes */
6057	for (i = nr_cpumask_bits-1; i >= 0; i--) {
6058		if (!cpumask_test_cpu(i, cpu_map))
6059			continue;
6060
6061		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6062			claim_allocations(i, sd);
6063			init_sched_groups_power(i, sd);
6064		}
6065	}
6066
6067	/* Attach the domains */
6068	rcu_read_lock();
6069	for_each_cpu(i, cpu_map) {
6070		sd = *per_cpu_ptr(d.sd, i);
6071		cpu_attach_domain(sd, d.rd, i);
6072	}
6073	rcu_read_unlock();
6074
6075	ret = 0;
6076error:
6077	__free_domain_allocs(&d, alloc_state, cpu_map);
6078	return ret;
6079}
6080
6081static cpumask_var_t *doms_cur;	/* current sched domains */
6082static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
6083static struct sched_domain_attr *dattr_cur;
6084				/* attribues of custom domains in 'doms_cur' */
6085
6086/*
6087 * Special case: If a kmalloc of a doms_cur partition (array of
6088 * cpumask) fails, then fallback to a single sched domain,
6089 * as determined by the single cpumask fallback_doms.
6090 */
6091static cpumask_var_t fallback_doms;
6092
6093/*
6094 * arch_update_cpu_topology lets virtualized architectures update the
6095 * cpu core maps. It is supposed to return 1 if the topology changed
6096 * or 0 if it stayed the same.
6097 */
6098int __attribute__((weak)) arch_update_cpu_topology(void)
6099{
6100	return 0;
6101}
6102
6103cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6104{
6105	int i;
6106	cpumask_var_t *doms;
6107
6108	doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6109	if (!doms)
6110		return NULL;
6111	for (i = 0; i < ndoms; i++) {
6112		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6113			free_sched_domains(doms, i);
6114			return NULL;
6115		}
6116	}
6117	return doms;
6118}
6119
6120void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6121{
6122	unsigned int i;
6123	for (i = 0; i < ndoms; i++)
6124		free_cpumask_var(doms[i]);
6125	kfree(doms);
6126}
6127
6128/*
6129 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6130 * For now this just excludes isolated cpus, but could be used to
6131 * exclude other special cases in the future.
6132 */
6133static int init_sched_domains(const struct cpumask *cpu_map)
6134{
6135	int err;
6136
6137	arch_update_cpu_topology();
6138	ndoms_cur = 1;
6139	doms_cur = alloc_sched_domains(ndoms_cur);
6140	if (!doms_cur)
6141		doms_cur = &fallback_doms;
6142	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6143	err = build_sched_domains(doms_cur[0], NULL);
6144	register_sched_domain_sysctl();
6145
6146	return err;
6147}
6148
6149/*
6150 * Detach sched domains from a group of cpus specified in cpu_map
6151 * These cpus will now be attached to the NULL domain
6152 */
6153static void detach_destroy_domains(const struct cpumask *cpu_map)
6154{
6155	int i;
6156
6157	rcu_read_lock();
6158	for_each_cpu(i, cpu_map)
6159		cpu_attach_domain(NULL, &def_root_domain, i);
6160	rcu_read_unlock();
6161}
6162
6163/* handle null as "default" */
6164static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6165			struct sched_domain_attr *new, int idx_new)
6166{
6167	struct sched_domain_attr tmp;
6168
6169	/* fast path */
6170	if (!new && !cur)
6171		return 1;
6172
6173	tmp = SD_ATTR_INIT;
6174	return !memcmp(cur ? (cur + idx_cur) : &tmp,
6175			new ? (new + idx_new) : &tmp,
6176			sizeof(struct sched_domain_attr));
6177}
6178
6179/*
6180 * Partition sched domains as specified by the 'ndoms_new'
6181 * cpumasks in the array doms_new[] of cpumasks. This compares
6182 * doms_new[] to the current sched domain partitioning, doms_cur[].
6183 * It destroys each deleted domain and builds each new domain.
6184 *
6185 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
6186 * The masks don't intersect (don't overlap.) We should setup one
6187 * sched domain for each mask. CPUs not in any of the cpumasks will
6188 * not be load balanced. If the same cpumask appears both in the
6189 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6190 * it as it is.
6191 *
6192 * The passed in 'doms_new' should be allocated using
6193 * alloc_sched_domains.  This routine takes ownership of it and will
6194 * free_sched_domains it when done with it. If the caller failed the
6195 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6196 * and partition_sched_domains() will fallback to the single partition
6197 * 'fallback_doms', it also forces the domains to be rebuilt.
6198 *
6199 * If doms_new == NULL it will be replaced with cpu_online_mask.
6200 * ndoms_new == 0 is a special case for destroying existing domains,
6201 * and it will not create the default domain.
6202 *
6203 * Call with hotplug lock held
6204 */
6205void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
6206			     struct sched_domain_attr *dattr_new)
6207{
6208	int i, j, n;
6209	int new_topology;
6210
6211	mutex_lock(&sched_domains_mutex);
6212
6213	/* always unregister in case we don't destroy any domains */
6214	unregister_sched_domain_sysctl();
6215
6216	/* Let architecture update cpu core mappings. */
6217	new_topology = arch_update_cpu_topology();
6218
6219	n = doms_new ? ndoms_new : 0;
6220
6221	/* Destroy deleted domains */
6222	for (i = 0; i < ndoms_cur; i++) {
6223		for (j = 0; j < n && !new_topology; j++) {
6224			if (cpumask_equal(doms_cur[i], doms_new[j])
6225			    && dattrs_equal(dattr_cur, i, dattr_new, j))
6226				goto match1;
6227		}
6228		/* no match - a current sched domain not in new doms_new[] */
6229		detach_destroy_domains(doms_cur[i]);
6230match1:
6231		;
6232	}
6233
6234	n = ndoms_cur;
6235	if (doms_new == NULL) {
6236		n = 0;
6237		doms_new = &fallback_doms;
6238		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
6239		WARN_ON_ONCE(dattr_new);
6240	}
6241
6242	/* Build new domains */
6243	for (i = 0; i < ndoms_new; i++) {
6244		for (j = 0; j < n && !new_topology; j++) {
6245			if (cpumask_equal(doms_new[i], doms_cur[j])
6246			    && dattrs_equal(dattr_new, i, dattr_cur, j))
6247				goto match2;
6248		}
6249		/* no match - add a new doms_new */
6250		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
6251match2:
6252		;
6253	}
6254
6255	/* Remember the new sched domains */
6256	if (doms_cur != &fallback_doms)
6257		free_sched_domains(doms_cur, ndoms_cur);
6258	kfree(dattr_cur);	/* kfree(NULL) is safe */
6259	doms_cur = doms_new;
6260	dattr_cur = dattr_new;
6261	ndoms_cur = ndoms_new;
6262
6263	register_sched_domain_sysctl();
6264
6265	mutex_unlock(&sched_domains_mutex);
6266}
6267
6268static int num_cpus_frozen;	/* used to mark begin/end of suspend/resume */
6269
6270/*
6271 * Update cpusets according to cpu_active mask.  If cpusets are
6272 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6273 * around partition_sched_domains().
6274 *
6275 * If we come here as part of a suspend/resume, don't touch cpusets because we
6276 * want to restore it back to its original state upon resume anyway.
6277 */
6278static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6279			     void *hcpu)
6280{
6281	switch (action) {
6282	case CPU_ONLINE_FROZEN:
6283	case CPU_DOWN_FAILED_FROZEN:
6284
6285		/*
6286		 * num_cpus_frozen tracks how many CPUs are involved in suspend
6287		 * resume sequence. As long as this is not the last online
6288		 * operation in the resume sequence, just build a single sched
6289		 * domain, ignoring cpusets.
6290		 */
6291		num_cpus_frozen--;
6292		if (likely(num_cpus_frozen)) {
6293			partition_sched_domains(1, NULL, NULL);
6294			break;
6295		}
6296
6297		/*
6298		 * This is the last CPU online operation. So fall through and
6299		 * restore the original sched domains by considering the
6300		 * cpuset configurations.
6301		 */
6302
6303	case CPU_ONLINE:
6304	case CPU_DOWN_FAILED:
6305		cpuset_update_active_cpus(true);
6306		break;
6307	default:
6308		return NOTIFY_DONE;
6309	}
6310	return NOTIFY_OK;
6311}
6312
6313static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6314			       void *hcpu)
6315{
6316	switch (action) {
6317	case CPU_DOWN_PREPARE:
6318		cpuset_update_active_cpus(false);
6319		break;
6320	case CPU_DOWN_PREPARE_FROZEN:
6321		num_cpus_frozen++;
6322		partition_sched_domains(1, NULL, NULL);
6323		break;
6324	default:
6325		return NOTIFY_DONE;
6326	}
6327	return NOTIFY_OK;
6328}
6329
6330void __init sched_init_smp(void)
6331{
6332	cpumask_var_t non_isolated_cpus;
6333
6334	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
6335	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
6336
6337	sched_init_numa();
6338
6339	get_online_cpus();
6340	mutex_lock(&sched_domains_mutex);
6341	init_sched_domains(cpu_active_mask);
6342	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6343	if (cpumask_empty(non_isolated_cpus))
6344		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
6345	mutex_unlock(&sched_domains_mutex);
6346	put_online_cpus();
6347
6348	hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
6349	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6350	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
6351
6352	init_hrtick();
6353
6354	/* Move init over to a non-isolated CPU */
6355	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
6356		BUG();
6357	sched_init_granularity();
6358	free_cpumask_var(non_isolated_cpus);
6359
6360	init_sched_rt_class();
6361}
6362#else
6363void __init sched_init_smp(void)
6364{
6365	sched_init_granularity();
6366}
6367#endif /* CONFIG_SMP */
6368
6369const_debug unsigned int sysctl_timer_migration = 1;
6370
6371int in_sched_functions(unsigned long addr)
6372{
6373	return in_lock_functions(addr) ||
6374		(addr >= (unsigned long)__sched_text_start
6375		&& addr < (unsigned long)__sched_text_end);
6376}
6377
6378#ifdef CONFIG_CGROUP_SCHED
6379/*
6380 * Default task group.
6381 * Every task in system belongs to this group at bootup.
6382 */
6383struct task_group root_task_group;
6384LIST_HEAD(task_groups);
6385#endif
6386
6387DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6388
6389void __init sched_init(void)
6390{
6391	int i, j;
6392	unsigned long alloc_size = 0, ptr;
6393
6394#ifdef CONFIG_FAIR_GROUP_SCHED
6395	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6396#endif
6397#ifdef CONFIG_RT_GROUP_SCHED
6398	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6399#endif
6400#ifdef CONFIG_CPUMASK_OFFSTACK
6401	alloc_size += num_possible_cpus() * cpumask_size();
6402#endif
6403	if (alloc_size) {
6404		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
6405
6406#ifdef CONFIG_FAIR_GROUP_SCHED
6407		root_task_group.se = (struct sched_entity **)ptr;
6408		ptr += nr_cpu_ids * sizeof(void **);
6409
6410		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
6411		ptr += nr_cpu_ids * sizeof(void **);
6412
6413#endif /* CONFIG_FAIR_GROUP_SCHED */
6414#ifdef CONFIG_RT_GROUP_SCHED
6415		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
6416		ptr += nr_cpu_ids * sizeof(void **);
6417
6418		root_task_group.rt_rq = (struct rt_rq **)ptr;
6419		ptr += nr_cpu_ids * sizeof(void **);
6420
6421#endif /* CONFIG_RT_GROUP_SCHED */
6422#ifdef CONFIG_CPUMASK_OFFSTACK
6423		for_each_possible_cpu(i) {
6424			per_cpu(load_balance_mask, i) = (void *)ptr;
6425			ptr += cpumask_size();
6426		}
6427#endif /* CONFIG_CPUMASK_OFFSTACK */
6428	}
6429
6430#ifdef CONFIG_SMP
6431	init_defrootdomain();
6432#endif
6433
6434	init_rt_bandwidth(&def_rt_bandwidth,
6435			global_rt_period(), global_rt_runtime());
6436
6437#ifdef CONFIG_RT_GROUP_SCHED
6438	init_rt_bandwidth(&root_task_group.rt_bandwidth,
6439			global_rt_period(), global_rt_runtime());
6440#endif /* CONFIG_RT_GROUP_SCHED */
6441
6442#ifdef CONFIG_CGROUP_SCHED
6443	list_add(&root_task_group.list, &task_groups);
6444	INIT_LIST_HEAD(&root_task_group.children);
6445	INIT_LIST_HEAD(&root_task_group.siblings);
6446	autogroup_init(&init_task);
6447
6448#endif /* CONFIG_CGROUP_SCHED */
6449
6450	for_each_possible_cpu(i) {
6451		struct rq *rq;
6452
6453		rq = cpu_rq(i);
6454		raw_spin_lock_init(&rq->lock);
6455		rq->nr_running = 0;
6456		rq->calc_load_active = 0;
6457		rq->calc_load_update = jiffies + LOAD_FREQ;
6458		init_cfs_rq(&rq->cfs);
6459		init_rt_rq(&rq->rt, rq);
6460#ifdef CONFIG_FAIR_GROUP_SCHED
6461		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6462		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6463		/*
6464		 * How much cpu bandwidth does root_task_group get?
6465		 *
6466		 * In case of task-groups formed thr' the cgroup filesystem, it
6467		 * gets 100% of the cpu resources in the system. This overall
6468		 * system cpu resource is divided among the tasks of
6469		 * root_task_group and its child task-groups in a fair manner,
6470		 * based on each entity's (task or task-group's) weight
6471		 * (se->load.weight).
6472		 *
6473		 * In other words, if root_task_group has 10 tasks of weight
6474		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6475		 * then A0's share of the cpu resource is:
6476		 *
6477		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
6478		 *
6479		 * We achieve this by letting root_task_group's tasks sit
6480		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
6481		 */
6482		init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
6483		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
6484#endif /* CONFIG_FAIR_GROUP_SCHED */
6485
6486		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
6487#ifdef CONFIG_RT_GROUP_SCHED
6488		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
6489		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
6490#endif
6491
6492		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6493			rq->cpu_load[j] = 0;
6494
6495		rq->last_load_update_tick = jiffies;
6496
6497#ifdef CONFIG_SMP
6498		rq->sd = NULL;
6499		rq->rd = NULL;
6500		rq->cpu_power = SCHED_POWER_SCALE;
6501		rq->post_schedule = 0;
6502		rq->active_balance = 0;
6503		rq->next_balance = jiffies;
6504		rq->push_cpu = 0;
6505		rq->cpu = i;
6506		rq->online = 0;
6507		rq->idle_stamp = 0;
6508		rq->avg_idle = 2*sysctl_sched_migration_cost;
6509		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
6510
6511		INIT_LIST_HEAD(&rq->cfs_tasks);
6512
6513		rq_attach_root(rq, &def_root_domain);
6514#ifdef CONFIG_NO_HZ_COMMON
6515		rq->nohz_flags = 0;
6516#endif
6517#ifdef CONFIG_NO_HZ_FULL
6518		rq->last_sched_tick = 0;
6519#endif
6520#endif
6521		init_rq_hrtick(rq);
6522		atomic_set(&rq->nr_iowait, 0);
6523	}
6524
6525	set_load_weight(&init_task);
6526
6527#ifdef CONFIG_PREEMPT_NOTIFIERS
6528	INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6529#endif
6530
6531#ifdef CONFIG_RT_MUTEXES
6532	plist_head_init(&init_task.pi_waiters);
6533#endif
6534
6535	/*
6536	 * The boot idle thread does lazy MMU switching as well:
6537	 */
6538	atomic_inc(&init_mm.mm_count);
6539	enter_lazy_tlb(&init_mm, current);
6540
6541	/*
6542	 * Make us the idle thread. Technically, schedule() should not be
6543	 * called from this thread, however somewhere below it might be,
6544	 * but because we are the idle thread, we just pick up running again
6545	 * when this runqueue becomes "idle".
6546	 */
6547	init_idle(current, smp_processor_id());
6548
6549	calc_load_update = jiffies + LOAD_FREQ;
6550
6551	/*
6552	 * During early bootup we pretend to be a normal task:
6553	 */
6554	current->sched_class = &fair_sched_class;
6555
6556#ifdef CONFIG_SMP
6557	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
6558	/* May be allocated at isolcpus cmdline parse time */
6559	if (cpu_isolated_map == NULL)
6560		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
6561	idle_thread_set_boot_cpu();
6562#endif
6563	init_sched_fair_class();
6564
6565	scheduler_running = 1;
6566}
6567
6568#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
6569static inline int preempt_count_equals(int preempt_offset)
6570{
6571	int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
6572
6573	return (nested == preempt_offset);
6574}
6575
6576void __might_sleep(const char *file, int line, int preempt_offset)
6577{
6578	static unsigned long prev_jiffy;	/* ratelimiting */
6579
6580	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
6581	if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6582	    system_state != SYSTEM_RUNNING || oops_in_progress)
6583		return;
6584	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6585		return;
6586	prev_jiffy = jiffies;
6587
6588	printk(KERN_ERR
6589		"BUG: sleeping function called from invalid context at %s:%d\n",
6590			file, line);
6591	printk(KERN_ERR
6592		"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6593			in_atomic(), irqs_disabled(),
6594			current->pid, current->comm);
6595
6596	debug_show_held_locks(current);
6597	if (irqs_disabled())
6598		print_irqtrace_events(current);
6599	dump_stack();
6600}
6601EXPORT_SYMBOL(__might_sleep);
6602#endif
6603
6604#ifdef CONFIG_MAGIC_SYSRQ
6605static void normalize_task(struct rq *rq, struct task_struct *p)
6606{
6607	const struct sched_class *prev_class = p->sched_class;
6608	int old_prio = p->prio;
6609	int on_rq;
6610
6611	on_rq = p->on_rq;
6612	if (on_rq)
6613		dequeue_task(rq, p, 0);
6614	__setscheduler(rq, p, SCHED_NORMAL, 0);
6615	if (on_rq) {
6616		enqueue_task(rq, p, 0);
6617		resched_task(rq->curr);
6618	}
6619
6620	check_class_changed(rq, p, prev_class, old_prio);
6621}
6622
6623void normalize_rt_tasks(void)
6624{
6625	struct task_struct *g, *p;
6626	unsigned long flags;
6627	struct rq *rq;
6628
6629	read_lock_irqsave(&tasklist_lock, flags);
6630	do_each_thread(g, p) {
6631		/*
6632		 * Only normalize user tasks:
6633		 */
6634		if (!p->mm)
6635			continue;
6636
6637		p->se.exec_start		= 0;
6638#ifdef CONFIG_SCHEDSTATS
6639		p->se.statistics.wait_start	= 0;
6640		p->se.statistics.sleep_start	= 0;
6641		p->se.statistics.block_start	= 0;
6642#endif
6643
6644		if (!rt_task(p)) {
6645			/*
6646			 * Renice negative nice level userspace
6647			 * tasks back to 0:
6648			 */
6649			if (TASK_NICE(p) < 0 && p->mm)
6650				set_user_nice(p, 0);
6651			continue;
6652		}
6653
6654		raw_spin_lock(&p->pi_lock);
6655		rq = __task_rq_lock(p);
6656
6657		normalize_task(rq, p);
6658
6659		__task_rq_unlock(rq);
6660		raw_spin_unlock(&p->pi_lock);
6661	} while_each_thread(g, p);
6662
6663	read_unlock_irqrestore(&tasklist_lock, flags);
6664}
6665
6666#endif /* CONFIG_MAGIC_SYSRQ */
6667
6668#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
6669/*
6670 * These functions are only useful for the IA64 MCA handling, or kdb.
6671 *
6672 * They can only be called when the whole system has been
6673 * stopped - every CPU needs to be quiescent, and no scheduling
6674 * activity can take place. Using them for anything else would
6675 * be a serious bug, and as a result, they aren't even visible
6676 * under any other configuration.
6677 */
6678
6679/**
6680 * curr_task - return the current task for a given cpu.
6681 * @cpu: the processor in question.
6682 *
6683 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6684 *
6685 * Return: The current task for @cpu.
6686 */
6687struct task_struct *curr_task(int cpu)
6688{
6689	return cpu_curr(cpu);
6690}
6691
6692#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
6693
6694#ifdef CONFIG_IA64
6695/**
6696 * set_curr_task - set the current task for a given cpu.
6697 * @cpu: the processor in question.
6698 * @p: the task pointer to set.
6699 *
6700 * Description: This function must only be used when non-maskable interrupts
6701 * are serviced on a separate stack. It allows the architecture to switch the
6702 * notion of the current task on a cpu in a non-blocking manner. This function
6703 * must be called with all CPU's synchronized, and interrupts disabled, the
6704 * and caller must save the original value of the current task (see
6705 * curr_task() above) and restore that value before reenabling interrupts and
6706 * re-starting the system.
6707 *
6708 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6709 */
6710void set_curr_task(int cpu, struct task_struct *p)
6711{
6712	cpu_curr(cpu) = p;
6713}
6714
6715#endif
6716
6717#ifdef CONFIG_CGROUP_SCHED
6718/* task_group_lock serializes the addition/removal of task groups */
6719static DEFINE_SPINLOCK(task_group_lock);
6720
6721static void free_sched_group(struct task_group *tg)
6722{
6723	free_fair_sched_group(tg);
6724	free_rt_sched_group(tg);
6725	autogroup_free(tg);
6726	kfree(tg);
6727}
6728
6729/* allocate runqueue etc for a new task group */
6730struct task_group *sched_create_group(struct task_group *parent)
6731{
6732	struct task_group *tg;
6733
6734	tg = kzalloc(sizeof(*tg), GFP_KERNEL);
6735	if (!tg)
6736		return ERR_PTR(-ENOMEM);
6737
6738	if (!alloc_fair_sched_group(tg, parent))
6739		goto err;
6740
6741	if (!alloc_rt_sched_group(tg, parent))
6742		goto err;
6743
6744	return tg;
6745
6746err:
6747	free_sched_group(tg);
6748	return ERR_PTR(-ENOMEM);
6749}
6750
6751void sched_online_group(struct task_group *tg, struct task_group *parent)
6752{
6753	unsigned long flags;
6754
6755	spin_lock_irqsave(&task_group_lock, flags);
6756	list_add_rcu(&tg->list, &task_groups);
6757
6758	WARN_ON(!parent); /* root should already exist */
6759
6760	tg->parent = parent;
6761	INIT_LIST_HEAD(&tg->children);
6762	list_add_rcu(&tg->siblings, &parent->children);
6763	spin_unlock_irqrestore(&task_group_lock, flags);
6764}
6765
6766/* rcu callback to free various structures associated with a task group */
6767static void free_sched_group_rcu(struct rcu_head *rhp)
6768{
6769	/* now it should be safe to free those cfs_rqs */
6770	free_sched_group(container_of(rhp, struct task_group, rcu));
6771}
6772
6773/* Destroy runqueue etc associated with a task group */
6774void sched_destroy_group(struct task_group *tg)
6775{
6776	/* wait for possible concurrent references to cfs_rqs complete */
6777	call_rcu(&tg->rcu, free_sched_group_rcu);
6778}
6779
6780void sched_offline_group(struct task_group *tg)
6781{
6782	unsigned long flags;
6783	int i;
6784
6785	/* end participation in shares distribution */
6786	for_each_possible_cpu(i)
6787		unregister_fair_sched_group(tg, i);
6788
6789	spin_lock_irqsave(&task_group_lock, flags);
6790	list_del_rcu(&tg->list);
6791	list_del_rcu(&tg->siblings);
6792	spin_unlock_irqrestore(&task_group_lock, flags);
6793}
6794
6795/* change task's runqueue when it moves between groups.
6796 *	The caller of this function should have put the task in its new group
6797 *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
6798 *	reflect its new group.
6799 */
6800void sched_move_task(struct task_struct *tsk)
6801{
6802	struct task_group *tg;
6803	int on_rq, running;
6804	unsigned long flags;
6805	struct rq *rq;
6806
6807	rq = task_rq_lock(tsk, &flags);
6808
6809	running = task_current(rq, tsk);
6810	on_rq = tsk->on_rq;
6811
6812	if (on_rq)
6813		dequeue_task(rq, tsk, 0);
6814	if (unlikely(running))
6815		tsk->sched_class->put_prev_task(rq, tsk);
6816
6817	tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
6818				lockdep_is_held(&tsk->sighand->siglock)),
6819			  struct task_group, css);
6820	tg = autogroup_task_group(tsk, tg);
6821	tsk->sched_task_group = tg;
6822
6823#ifdef CONFIG_FAIR_GROUP_SCHED
6824	if (tsk->sched_class->task_move_group)
6825		tsk->sched_class->task_move_group(tsk, on_rq);
6826	else
6827#endif
6828		set_task_rq(tsk, task_cpu(tsk));
6829
6830	if (unlikely(running))
6831		tsk->sched_class->set_curr_task(rq);
6832	if (on_rq)
6833		enqueue_task(rq, tsk, 0);
6834
6835	task_rq_unlock(rq, tsk, &flags);
6836}
6837#endif /* CONFIG_CGROUP_SCHED */
6838
6839#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
6840static unsigned long to_ratio(u64 period, u64 runtime)
6841{
6842	if (runtime == RUNTIME_INF)
6843		return 1ULL << 20;
6844
6845	return div64_u64(runtime << 20, period);
6846}
6847#endif
6848
6849#ifdef CONFIG_RT_GROUP_SCHED
6850/*
6851 * Ensure that the real time constraints are schedulable.
6852 */
6853static DEFINE_MUTEX(rt_constraints_mutex);
6854
6855/* Must be called with tasklist_lock held */
6856static inline int tg_has_rt_tasks(struct task_group *tg)
6857{
6858	struct task_struct *g, *p;
6859
6860	do_each_thread(g, p) {
6861		if (rt_task(p) && task_rq(p)->rt.tg == tg)
6862			return 1;
6863	} while_each_thread(g, p);
6864
6865	return 0;
6866}
6867
6868struct rt_schedulable_data {
6869	struct task_group *tg;
6870	u64 rt_period;
6871	u64 rt_runtime;
6872};
6873
6874static int tg_rt_schedulable(struct task_group *tg, void *data)
6875{
6876	struct rt_schedulable_data *d = data;
6877	struct task_group *child;
6878	unsigned long total, sum = 0;
6879	u64 period, runtime;
6880
6881	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
6882	runtime = tg->rt_bandwidth.rt_runtime;
6883
6884	if (tg == d->tg) {
6885		period = d->rt_period;
6886		runtime = d->rt_runtime;
6887	}
6888
6889	/*
6890	 * Cannot have more runtime than the period.
6891	 */
6892	if (runtime > period && runtime != RUNTIME_INF)
6893		return -EINVAL;
6894
6895	/*
6896	 * Ensure we don't starve existing RT tasks.
6897	 */
6898	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
6899		return -EBUSY;
6900
6901	total = to_ratio(period, runtime);
6902
6903	/*
6904	 * Nobody can have more than the global setting allows.
6905	 */
6906	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
6907		return -EINVAL;
6908
6909	/*
6910	 * The sum of our children's runtime should not exceed our own.
6911	 */
6912	list_for_each_entry_rcu(child, &tg->children, siblings) {
6913		period = ktime_to_ns(child->rt_bandwidth.rt_period);
6914		runtime = child->rt_bandwidth.rt_runtime;
6915
6916		if (child == d->tg) {
6917			period = d->rt_period;
6918			runtime = d->rt_runtime;
6919		}
6920
6921		sum += to_ratio(period, runtime);
6922	}
6923
6924	if (sum > total)
6925		return -EINVAL;
6926
6927	return 0;
6928}
6929
6930static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
6931{
6932	int ret;
6933
6934	struct rt_schedulable_data data = {
6935		.tg = tg,
6936		.rt_period = period,
6937		.rt_runtime = runtime,
6938	};
6939
6940	rcu_read_lock();
6941	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
6942	rcu_read_unlock();
6943
6944	return ret;
6945}
6946
6947static int tg_set_rt_bandwidth(struct task_group *tg,
6948		u64 rt_period, u64 rt_runtime)
6949{
6950	int i, err = 0;
6951
6952	mutex_lock(&rt_constraints_mutex);
6953	read_lock(&tasklist_lock);
6954	err = __rt_schedulable(tg, rt_period, rt_runtime);
6955	if (err)
6956		goto unlock;
6957
6958	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
6959	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
6960	tg->rt_bandwidth.rt_runtime = rt_runtime;
6961
6962	for_each_possible_cpu(i) {
6963		struct rt_rq *rt_rq = tg->rt_rq[i];
6964
6965		raw_spin_lock(&rt_rq->rt_runtime_lock);
6966		rt_rq->rt_runtime = rt_runtime;
6967		raw_spin_unlock(&rt_rq->rt_runtime_lock);
6968	}
6969	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
6970unlock:
6971	read_unlock(&tasklist_lock);
6972	mutex_unlock(&rt_constraints_mutex);
6973
6974	return err;
6975}
6976
6977static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
6978{
6979	u64 rt_runtime, rt_period;
6980
6981	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
6982	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
6983	if (rt_runtime_us < 0)
6984		rt_runtime = RUNTIME_INF;
6985
6986	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
6987}
6988
6989static long sched_group_rt_runtime(struct task_group *tg)
6990{
6991	u64 rt_runtime_us;
6992
6993	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
6994		return -1;
6995
6996	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
6997	do_div(rt_runtime_us, NSEC_PER_USEC);
6998	return rt_runtime_us;
6999}
7000
7001static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7002{
7003	u64 rt_runtime, rt_period;
7004
7005	rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7006	rt_runtime = tg->rt_bandwidth.rt_runtime;
7007
7008	if (rt_period == 0)
7009		return -EINVAL;
7010
7011	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7012}
7013
7014static long sched_group_rt_period(struct task_group *tg)
7015{
7016	u64 rt_period_us;
7017
7018	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7019	do_div(rt_period_us, NSEC_PER_USEC);
7020	return rt_period_us;
7021}
7022
7023static int sched_rt_global_constraints(void)
7024{
7025	u64 runtime, period;
7026	int ret = 0;
7027
7028	if (sysctl_sched_rt_period <= 0)
7029		return -EINVAL;
7030
7031	runtime = global_rt_runtime();
7032	period = global_rt_period();
7033
7034	/*
7035	 * Sanity check on the sysctl variables.
7036	 */
7037	if (runtime > period && runtime != RUNTIME_INF)
7038		return -EINVAL;
7039
7040	mutex_lock(&rt_constraints_mutex);
7041	read_lock(&tasklist_lock);
7042	ret = __rt_schedulable(NULL, 0, 0);
7043	read_unlock(&tasklist_lock);
7044	mutex_unlock(&rt_constraints_mutex);
7045
7046	return ret;
7047}
7048
7049static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7050{
7051	/* Don't accept realtime tasks when there is no way for them to run */
7052	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7053		return 0;
7054
7055	return 1;
7056}
7057
7058#else /* !CONFIG_RT_GROUP_SCHED */
7059static int sched_rt_global_constraints(void)
7060{
7061	unsigned long flags;
7062	int i;
7063
7064	if (sysctl_sched_rt_period <= 0)
7065		return -EINVAL;
7066
7067	/*
7068	 * There's always some RT tasks in the root group
7069	 * -- migration, kstopmachine etc..
7070	 */
7071	if (sysctl_sched_rt_runtime == 0)
7072		return -EBUSY;
7073
7074	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
7075	for_each_possible_cpu(i) {
7076		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7077
7078		raw_spin_lock(&rt_rq->rt_runtime_lock);
7079		rt_rq->rt_runtime = global_rt_runtime();
7080		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7081	}
7082	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7083
7084	return 0;
7085}
7086#endif /* CONFIG_RT_GROUP_SCHED */
7087
7088int sched_rr_handler(struct ctl_table *table, int write,
7089		void __user *buffer, size_t *lenp,
7090		loff_t *ppos)
7091{
7092	int ret;
7093	static DEFINE_MUTEX(mutex);
7094
7095	mutex_lock(&mutex);
7096	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7097	/* make sure that internally we keep jiffies */
7098	/* also, writing zero resets timeslice to default */
7099	if (!ret && write) {
7100		sched_rr_timeslice = sched_rr_timeslice <= 0 ?
7101			RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
7102	}
7103	mutex_unlock(&mutex);
7104	return ret;
7105}
7106
7107int sched_rt_handler(struct ctl_table *table, int write,
7108		void __user *buffer, size_t *lenp,
7109		loff_t *ppos)
7110{
7111	int ret;
7112	int old_period, old_runtime;
7113	static DEFINE_MUTEX(mutex);
7114
7115	mutex_lock(&mutex);
7116	old_period = sysctl_sched_rt_period;
7117	old_runtime = sysctl_sched_rt_runtime;
7118
7119	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7120
7121	if (!ret && write) {
7122		ret = sched_rt_global_constraints();
7123		if (ret) {
7124			sysctl_sched_rt_period = old_period;
7125			sysctl_sched_rt_runtime = old_runtime;
7126		} else {
7127			def_rt_bandwidth.rt_runtime = global_rt_runtime();
7128			def_rt_bandwidth.rt_period =
7129				ns_to_ktime(global_rt_period());
7130		}
7131	}
7132	mutex_unlock(&mutex);
7133
7134	return ret;
7135}
7136
7137#ifdef CONFIG_CGROUP_SCHED
7138
7139static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
7140{
7141	return css ? container_of(css, struct task_group, css) : NULL;
7142}
7143
7144static struct cgroup_subsys_state *
7145cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
7146{
7147	struct task_group *parent = css_tg(parent_css);
7148	struct task_group *tg;
7149
7150	if (!parent) {
7151		/* This is early initialization for the top cgroup */
7152		return &root_task_group.css;
7153	}
7154
7155	tg = sched_create_group(parent);
7156	if (IS_ERR(tg))
7157		return ERR_PTR(-ENOMEM);
7158
7159	return &tg->css;
7160}
7161
7162static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
7163{
7164	struct task_group *tg = css_tg(css);
7165	struct task_group *parent = css_tg(css_parent(css));
7166
7167	if (parent)
7168		sched_online_group(tg, parent);
7169	return 0;
7170}
7171
7172static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
7173{
7174	struct task_group *tg = css_tg(css);
7175
7176	sched_destroy_group(tg);
7177}
7178
7179static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
7180{
7181	struct task_group *tg = css_tg(css);
7182
7183	sched_offline_group(tg);
7184}
7185
7186static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7187				 struct cgroup_taskset *tset)
7188{
7189	struct task_struct *task;
7190
7191	cgroup_taskset_for_each(task, css, tset) {
7192#ifdef CONFIG_RT_GROUP_SCHED
7193		if (!sched_rt_can_attach(css_tg(css), task))
7194			return -EINVAL;
7195#else
7196		/* We don't support RT-tasks being in separate groups */
7197		if (task->sched_class != &fair_sched_class)
7198			return -EINVAL;
7199#endif
7200	}
7201	return 0;
7202}
7203
7204static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
7205			      struct cgroup_taskset *tset)
7206{
7207	struct task_struct *task;
7208
7209	cgroup_taskset_for_each(task, css, tset)
7210		sched_move_task(task);
7211}
7212
7213static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
7214			    struct cgroup_subsys_state *old_css,
7215			    struct task_struct *task)
7216{
7217	/*
7218	 * cgroup_exit() is called in the copy_process() failure path.
7219	 * Ignore this case since the task hasn't ran yet, this avoids
7220	 * trying to poke a half freed task state from generic code.
7221	 */
7222	if (!(task->flags & PF_EXITING))
7223		return;
7224
7225	sched_move_task(task);
7226}
7227
7228#ifdef CONFIG_FAIR_GROUP_SCHED
7229static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
7230				struct cftype *cftype, u64 shareval)
7231{
7232	return sched_group_set_shares(css_tg(css), scale_load(shareval));
7233}
7234
7235static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
7236			       struct cftype *cft)
7237{
7238	struct task_group *tg = css_tg(css);
7239
7240	return (u64) scale_load_down(tg->shares);
7241}
7242
7243#ifdef CONFIG_CFS_BANDWIDTH
7244static DEFINE_MUTEX(cfs_constraints_mutex);
7245
7246const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7247const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7248
7249static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7250
7251static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7252{
7253	int i, ret = 0, runtime_enabled, runtime_was_enabled;
7254	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7255
7256	if (tg == &root_task_group)
7257		return -EINVAL;
7258
7259	/*
7260	 * Ensure we have at some amount of bandwidth every period.  This is
7261	 * to prevent reaching a state of large arrears when throttled via
7262	 * entity_tick() resulting in prolonged exit starvation.
7263	 */
7264	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7265		return -EINVAL;
7266
7267	/*
7268	 * Likewise, bound things on the otherside by preventing insane quota
7269	 * periods.  This also allows us to normalize in computing quota
7270	 * feasibility.
7271	 */
7272	if (period > max_cfs_quota_period)
7273		return -EINVAL;
7274
7275	mutex_lock(&cfs_constraints_mutex);
7276	ret = __cfs_schedulable(tg, period, quota);
7277	if (ret)
7278		goto out_unlock;
7279
7280	runtime_enabled = quota != RUNTIME_INF;
7281	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7282	account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
7283	raw_spin_lock_irq(&cfs_b->lock);
7284	cfs_b->period = ns_to_ktime(period);
7285	cfs_b->quota = quota;
7286
7287	__refill_cfs_bandwidth_runtime(cfs_b);
7288	/* restart the period timer (if active) to handle new period expiry */
7289	if (runtime_enabled && cfs_b->timer_active) {
7290		/* force a reprogram */
7291		cfs_b->timer_active = 0;
7292		__start_cfs_bandwidth(cfs_b);
7293	}
7294	raw_spin_unlock_irq(&cfs_b->lock);
7295
7296	for_each_possible_cpu(i) {
7297		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
7298		struct rq *rq = cfs_rq->rq;
7299
7300		raw_spin_lock_irq(&rq->lock);
7301		cfs_rq->runtime_enabled = runtime_enabled;
7302		cfs_rq->runtime_remaining = 0;
7303
7304		if (cfs_rq->throttled)
7305			unthrottle_cfs_rq(cfs_rq);
7306		raw_spin_unlock_irq(&rq->lock);
7307	}
7308out_unlock:
7309	mutex_unlock(&cfs_constraints_mutex);
7310
7311	return ret;
7312}
7313
7314int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7315{
7316	u64 quota, period;
7317
7318	period = ktime_to_ns(tg->cfs_bandwidth.period);
7319	if (cfs_quota_us < 0)
7320		quota = RUNTIME_INF;
7321	else
7322		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7323
7324	return tg_set_cfs_bandwidth(tg, period, quota);
7325}
7326
7327long tg_get_cfs_quota(struct task_group *tg)
7328{
7329	u64 quota_us;
7330
7331	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
7332		return -1;
7333
7334	quota_us = tg->cfs_bandwidth.quota;
7335	do_div(quota_us, NSEC_PER_USEC);
7336
7337	return quota_us;
7338}
7339
7340int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7341{
7342	u64 quota, period;
7343
7344	period = (u64)cfs_period_us * NSEC_PER_USEC;
7345	quota = tg->cfs_bandwidth.quota;
7346
7347	return tg_set_cfs_bandwidth(tg, period, quota);
7348}
7349
7350long tg_get_cfs_period(struct task_group *tg)
7351{
7352	u64 cfs_period_us;
7353
7354	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
7355	do_div(cfs_period_us, NSEC_PER_USEC);
7356
7357	return cfs_period_us;
7358}
7359
7360static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
7361				  struct cftype *cft)
7362{
7363	return tg_get_cfs_quota(css_tg(css));
7364}
7365
7366static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
7367				   struct cftype *cftype, s64 cfs_quota_us)
7368{
7369	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
7370}
7371
7372static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
7373				   struct cftype *cft)
7374{
7375	return tg_get_cfs_period(css_tg(css));
7376}
7377
7378static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
7379				    struct cftype *cftype, u64 cfs_period_us)
7380{
7381	return tg_set_cfs_period(css_tg(css), cfs_period_us);
7382}
7383
7384struct cfs_schedulable_data {
7385	struct task_group *tg;
7386	u64 period, quota;
7387};
7388
7389/*
7390 * normalize group quota/period to be quota/max_period
7391 * note: units are usecs
7392 */
7393static u64 normalize_cfs_quota(struct task_group *tg,
7394			       struct cfs_schedulable_data *d)
7395{
7396	u64 quota, period;
7397
7398	if (tg == d->tg) {
7399		period = d->period;
7400		quota = d->quota;
7401	} else {
7402		period = tg_get_cfs_period(tg);
7403		quota = tg_get_cfs_quota(tg);
7404	}
7405
7406	/* note: these should typically be equivalent */
7407	if (quota == RUNTIME_INF || quota == -1)
7408		return RUNTIME_INF;
7409
7410	return to_ratio(period, quota);
7411}
7412
7413static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7414{
7415	struct cfs_schedulable_data *d = data;
7416	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7417	s64 quota = 0, parent_quota = -1;
7418
7419	if (!tg->parent) {
7420		quota = RUNTIME_INF;
7421	} else {
7422		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
7423
7424		quota = normalize_cfs_quota(tg, d);
7425		parent_quota = parent_b->hierarchal_quota;
7426
7427		/*
7428		 * ensure max(child_quota) <= parent_quota, inherit when no
7429		 * limit is set
7430		 */
7431		if (quota == RUNTIME_INF)
7432			quota = parent_quota;
7433		else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7434			return -EINVAL;
7435	}
7436	cfs_b->hierarchal_quota = quota;
7437
7438	return 0;
7439}
7440
7441static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7442{
7443	int ret;
7444	struct cfs_schedulable_data data = {
7445		.tg = tg,
7446		.period = period,
7447		.quota = quota,
7448	};
7449
7450	if (quota != RUNTIME_INF) {
7451		do_div(data.period, NSEC_PER_USEC);
7452		do_div(data.quota, NSEC_PER_USEC);
7453	}
7454
7455	rcu_read_lock();
7456	ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7457	rcu_read_unlock();
7458
7459	return ret;
7460}
7461
7462static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft,
7463		struct cgroup_map_cb *cb)
7464{
7465	struct task_group *tg = css_tg(css);
7466	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7467
7468	cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7469	cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7470	cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7471
7472	return 0;
7473}
7474#endif /* CONFIG_CFS_BANDWIDTH */
7475#endif /* CONFIG_FAIR_GROUP_SCHED */
7476
7477#ifdef CONFIG_RT_GROUP_SCHED
7478static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
7479				struct cftype *cft, s64 val)
7480{
7481	return sched_group_set_rt_runtime(css_tg(css), val);
7482}
7483
7484static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
7485			       struct cftype *cft)
7486{
7487	return sched_group_rt_runtime(css_tg(css));
7488}
7489
7490static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
7491				    struct cftype *cftype, u64 rt_period_us)
7492{
7493	return sched_group_set_rt_period(css_tg(css), rt_period_us);
7494}
7495
7496static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
7497				   struct cftype *cft)
7498{
7499	return sched_group_rt_period(css_tg(css));
7500}
7501#endif /* CONFIG_RT_GROUP_SCHED */
7502
7503static struct cftype cpu_files[] = {
7504#ifdef CONFIG_FAIR_GROUP_SCHED
7505	{
7506		.name = "shares",
7507		.read_u64 = cpu_shares_read_u64,
7508		.write_u64 = cpu_shares_write_u64,
7509	},
7510#endif
7511#ifdef CONFIG_CFS_BANDWIDTH
7512	{
7513		.name = "cfs_quota_us",
7514		.read_s64 = cpu_cfs_quota_read_s64,
7515		.write_s64 = cpu_cfs_quota_write_s64,
7516	},
7517	{
7518		.name = "cfs_period_us",
7519		.read_u64 = cpu_cfs_period_read_u64,
7520		.write_u64 = cpu_cfs_period_write_u64,
7521	},
7522	{
7523		.name = "stat",
7524		.read_map = cpu_stats_show,
7525	},
7526#endif
7527#ifdef CONFIG_RT_GROUP_SCHED
7528	{
7529		.name = "rt_runtime_us",
7530		.read_s64 = cpu_rt_runtime_read,
7531		.write_s64 = cpu_rt_runtime_write,
7532	},
7533	{
7534		.name = "rt_period_us",
7535		.read_u64 = cpu_rt_period_read_uint,
7536		.write_u64 = cpu_rt_period_write_uint,
7537	},
7538#endif
7539	{ }	/* terminate */
7540};
7541
7542struct cgroup_subsys cpu_cgroup_subsys = {
7543	.name		= "cpu",
7544	.css_alloc	= cpu_cgroup_css_alloc,
7545	.css_free	= cpu_cgroup_css_free,
7546	.css_online	= cpu_cgroup_css_online,
7547	.css_offline	= cpu_cgroup_css_offline,
7548	.can_attach	= cpu_cgroup_can_attach,
7549	.attach		= cpu_cgroup_attach,
7550	.exit		= cpu_cgroup_exit,
7551	.subsys_id	= cpu_cgroup_subsys_id,
7552	.base_cftypes	= cpu_files,
7553	.early_init	= 1,
7554};
7555
7556#endif	/* CONFIG_CGROUP_SCHED */
7557
7558void dump_cpu_task(int cpu)
7559{
7560	pr_info("Task dump for CPU %d:\n", cpu);
7561	sched_show_task(cpu_curr(cpu));
7562}
7563