core.c revision a15b12ac36ad4e7b856a4ae54937ae26a51aebad
1/*
2 *  kernel/sched/core.c
3 *
4 *  Kernel scheduler and related syscalls
5 *
6 *  Copyright (C) 1991-2002  Linus Torvalds
7 *
8 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9 *		make semaphores SMP safe
10 *  1998-11-19	Implemented schedule_timeout() and related stuff
11 *		by Andrea Arcangeli
12 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
13 *		hybrid priority-list and round-robin design with
14 *		an array-switch method of distributing timeslices
15 *		and per-CPU runqueues.  Cleanups and useful suggestions
16 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
17 *  2003-09-03	Interactivity tuning by Con Kolivas.
18 *  2004-04-02	Scheduler domains code by Nick Piggin
19 *  2007-04-15  Work begun on replacing all interactivity tuning with a
20 *              fair scheduling design by Con Kolivas.
21 *  2007-05-05  Load balancing (smp-nice) and other improvements
22 *              by Peter Williams
23 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
24 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
25 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 *              Thomas Gleixner, Mike Kravetz
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
33#include <linux/uaccess.h>
34#include <linux/highmem.h>
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
37#include <linux/capability.h>
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
40#include <linux/debug_locks.h>
41#include <linux/perf_event.h>
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
45#include <linux/freezer.h>
46#include <linux/vmalloc.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/pid_namespace.h>
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
57#include <linux/proc_fs.h>
58#include <linux/seq_file.h>
59#include <linux/sysctl.h>
60#include <linux/syscalls.h>
61#include <linux/times.h>
62#include <linux/tsacct_kern.h>
63#include <linux/kprobes.h>
64#include <linux/delayacct.h>
65#include <linux/unistd.h>
66#include <linux/pagemap.h>
67#include <linux/hrtimer.h>
68#include <linux/tick.h>
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
71#include <linux/ftrace.h>
72#include <linux/slab.h>
73#include <linux/init_task.h>
74#include <linux/binfmts.h>
75#include <linux/context_tracking.h>
76#include <linux/compiler.h>
77
78#include <asm/switch_to.h>
79#include <asm/tlb.h>
80#include <asm/irq_regs.h>
81#include <asm/mutex.h>
82#ifdef CONFIG_PARAVIRT
83#include <asm/paravirt.h>
84#endif
85
86#include "sched.h"
87#include "../workqueue_internal.h"
88#include "../smpboot.h"
89
90#define CREATE_TRACE_POINTS
91#include <trace/events/sched.h>
92
93#ifdef smp_mb__before_atomic
94void __smp_mb__before_atomic(void)
95{
96	smp_mb__before_atomic();
97}
98EXPORT_SYMBOL(__smp_mb__before_atomic);
99#endif
100
101#ifdef smp_mb__after_atomic
102void __smp_mb__after_atomic(void)
103{
104	smp_mb__after_atomic();
105}
106EXPORT_SYMBOL(__smp_mb__after_atomic);
107#endif
108
109void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
110{
111	unsigned long delta;
112	ktime_t soft, hard, now;
113
114	for (;;) {
115		if (hrtimer_active(period_timer))
116			break;
117
118		now = hrtimer_cb_get_time(period_timer);
119		hrtimer_forward(period_timer, now, period);
120
121		soft = hrtimer_get_softexpires(period_timer);
122		hard = hrtimer_get_expires(period_timer);
123		delta = ktime_to_ns(ktime_sub(hard, soft));
124		__hrtimer_start_range_ns(period_timer, soft, delta,
125					 HRTIMER_MODE_ABS_PINNED, 0);
126	}
127}
128
129DEFINE_MUTEX(sched_domains_mutex);
130DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
131
132static void update_rq_clock_task(struct rq *rq, s64 delta);
133
134void update_rq_clock(struct rq *rq)
135{
136	s64 delta;
137
138	if (rq->skip_clock_update > 0)
139		return;
140
141	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
142	if (delta < 0)
143		return;
144	rq->clock += delta;
145	update_rq_clock_task(rq, delta);
146}
147
148/*
149 * Debugging: various feature bits
150 */
151
152#define SCHED_FEAT(name, enabled)	\
153	(1UL << __SCHED_FEAT_##name) * enabled |
154
155const_debug unsigned int sysctl_sched_features =
156#include "features.h"
157	0;
158
159#undef SCHED_FEAT
160
161#ifdef CONFIG_SCHED_DEBUG
162#define SCHED_FEAT(name, enabled)	\
163	#name ,
164
165static const char * const sched_feat_names[] = {
166#include "features.h"
167};
168
169#undef SCHED_FEAT
170
171static int sched_feat_show(struct seq_file *m, void *v)
172{
173	int i;
174
175	for (i = 0; i < __SCHED_FEAT_NR; i++) {
176		if (!(sysctl_sched_features & (1UL << i)))
177			seq_puts(m, "NO_");
178		seq_printf(m, "%s ", sched_feat_names[i]);
179	}
180	seq_puts(m, "\n");
181
182	return 0;
183}
184
185#ifdef HAVE_JUMP_LABEL
186
187#define jump_label_key__true  STATIC_KEY_INIT_TRUE
188#define jump_label_key__false STATIC_KEY_INIT_FALSE
189
190#define SCHED_FEAT(name, enabled)	\
191	jump_label_key__##enabled ,
192
193struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
194#include "features.h"
195};
196
197#undef SCHED_FEAT
198
199static void sched_feat_disable(int i)
200{
201	if (static_key_enabled(&sched_feat_keys[i]))
202		static_key_slow_dec(&sched_feat_keys[i]);
203}
204
205static void sched_feat_enable(int i)
206{
207	if (!static_key_enabled(&sched_feat_keys[i]))
208		static_key_slow_inc(&sched_feat_keys[i]);
209}
210#else
211static void sched_feat_disable(int i) { };
212static void sched_feat_enable(int i) { };
213#endif /* HAVE_JUMP_LABEL */
214
215static int sched_feat_set(char *cmp)
216{
217	int i;
218	int neg = 0;
219
220	if (strncmp(cmp, "NO_", 3) == 0) {
221		neg = 1;
222		cmp += 3;
223	}
224
225	for (i = 0; i < __SCHED_FEAT_NR; i++) {
226		if (strcmp(cmp, sched_feat_names[i]) == 0) {
227			if (neg) {
228				sysctl_sched_features &= ~(1UL << i);
229				sched_feat_disable(i);
230			} else {
231				sysctl_sched_features |= (1UL << i);
232				sched_feat_enable(i);
233			}
234			break;
235		}
236	}
237
238	return i;
239}
240
241static ssize_t
242sched_feat_write(struct file *filp, const char __user *ubuf,
243		size_t cnt, loff_t *ppos)
244{
245	char buf[64];
246	char *cmp;
247	int i;
248	struct inode *inode;
249
250	if (cnt > 63)
251		cnt = 63;
252
253	if (copy_from_user(&buf, ubuf, cnt))
254		return -EFAULT;
255
256	buf[cnt] = 0;
257	cmp = strstrip(buf);
258
259	/* Ensure the static_key remains in a consistent state */
260	inode = file_inode(filp);
261	mutex_lock(&inode->i_mutex);
262	i = sched_feat_set(cmp);
263	mutex_unlock(&inode->i_mutex);
264	if (i == __SCHED_FEAT_NR)
265		return -EINVAL;
266
267	*ppos += cnt;
268
269	return cnt;
270}
271
272static int sched_feat_open(struct inode *inode, struct file *filp)
273{
274	return single_open(filp, sched_feat_show, NULL);
275}
276
277static const struct file_operations sched_feat_fops = {
278	.open		= sched_feat_open,
279	.write		= sched_feat_write,
280	.read		= seq_read,
281	.llseek		= seq_lseek,
282	.release	= single_release,
283};
284
285static __init int sched_init_debug(void)
286{
287	debugfs_create_file("sched_features", 0644, NULL, NULL,
288			&sched_feat_fops);
289
290	return 0;
291}
292late_initcall(sched_init_debug);
293#endif /* CONFIG_SCHED_DEBUG */
294
295/*
296 * Number of tasks to iterate in a single balance run.
297 * Limited because this is done with IRQs disabled.
298 */
299const_debug unsigned int sysctl_sched_nr_migrate = 32;
300
301/*
302 * period over which we average the RT time consumption, measured
303 * in ms.
304 *
305 * default: 1s
306 */
307const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
308
309/*
310 * period over which we measure -rt task cpu usage in us.
311 * default: 1s
312 */
313unsigned int sysctl_sched_rt_period = 1000000;
314
315__read_mostly int scheduler_running;
316
317/*
318 * part of the period that we allow rt tasks to run in us.
319 * default: 0.95s
320 */
321int sysctl_sched_rt_runtime = 950000;
322
323/*
324 * __task_rq_lock - lock the rq @p resides on.
325 */
326static inline struct rq *__task_rq_lock(struct task_struct *p)
327	__acquires(rq->lock)
328{
329	struct rq *rq;
330
331	lockdep_assert_held(&p->pi_lock);
332
333	for (;;) {
334		rq = task_rq(p);
335		raw_spin_lock(&rq->lock);
336		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
337			return rq;
338		raw_spin_unlock(&rq->lock);
339
340		while (unlikely(task_on_rq_migrating(p)))
341			cpu_relax();
342	}
343}
344
345/*
346 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
347 */
348static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
349	__acquires(p->pi_lock)
350	__acquires(rq->lock)
351{
352	struct rq *rq;
353
354	for (;;) {
355		raw_spin_lock_irqsave(&p->pi_lock, *flags);
356		rq = task_rq(p);
357		raw_spin_lock(&rq->lock);
358		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
359			return rq;
360		raw_spin_unlock(&rq->lock);
361		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
362
363		while (unlikely(task_on_rq_migrating(p)))
364			cpu_relax();
365	}
366}
367
368static void __task_rq_unlock(struct rq *rq)
369	__releases(rq->lock)
370{
371	raw_spin_unlock(&rq->lock);
372}
373
374static inline void
375task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
376	__releases(rq->lock)
377	__releases(p->pi_lock)
378{
379	raw_spin_unlock(&rq->lock);
380	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
381}
382
383/*
384 * this_rq_lock - lock this runqueue and disable interrupts.
385 */
386static struct rq *this_rq_lock(void)
387	__acquires(rq->lock)
388{
389	struct rq *rq;
390
391	local_irq_disable();
392	rq = this_rq();
393	raw_spin_lock(&rq->lock);
394
395	return rq;
396}
397
398#ifdef CONFIG_SCHED_HRTICK
399/*
400 * Use HR-timers to deliver accurate preemption points.
401 */
402
403static void hrtick_clear(struct rq *rq)
404{
405	if (hrtimer_active(&rq->hrtick_timer))
406		hrtimer_cancel(&rq->hrtick_timer);
407}
408
409/*
410 * High-resolution timer tick.
411 * Runs from hardirq context with interrupts disabled.
412 */
413static enum hrtimer_restart hrtick(struct hrtimer *timer)
414{
415	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
416
417	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
418
419	raw_spin_lock(&rq->lock);
420	update_rq_clock(rq);
421	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
422	raw_spin_unlock(&rq->lock);
423
424	return HRTIMER_NORESTART;
425}
426
427#ifdef CONFIG_SMP
428
429static int __hrtick_restart(struct rq *rq)
430{
431	struct hrtimer *timer = &rq->hrtick_timer;
432	ktime_t time = hrtimer_get_softexpires(timer);
433
434	return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
435}
436
437/*
438 * called from hardirq (IPI) context
439 */
440static void __hrtick_start(void *arg)
441{
442	struct rq *rq = arg;
443
444	raw_spin_lock(&rq->lock);
445	__hrtick_restart(rq);
446	rq->hrtick_csd_pending = 0;
447	raw_spin_unlock(&rq->lock);
448}
449
450/*
451 * Called to set the hrtick timer state.
452 *
453 * called with rq->lock held and irqs disabled
454 */
455void hrtick_start(struct rq *rq, u64 delay)
456{
457	struct hrtimer *timer = &rq->hrtick_timer;
458	ktime_t time;
459	s64 delta;
460
461	/*
462	 * Don't schedule slices shorter than 10000ns, that just
463	 * doesn't make sense and can cause timer DoS.
464	 */
465	delta = max_t(s64, delay, 10000LL);
466	time = ktime_add_ns(timer->base->get_time(), delta);
467
468	hrtimer_set_expires(timer, time);
469
470	if (rq == this_rq()) {
471		__hrtick_restart(rq);
472	} else if (!rq->hrtick_csd_pending) {
473		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
474		rq->hrtick_csd_pending = 1;
475	}
476}
477
478static int
479hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
480{
481	int cpu = (int)(long)hcpu;
482
483	switch (action) {
484	case CPU_UP_CANCELED:
485	case CPU_UP_CANCELED_FROZEN:
486	case CPU_DOWN_PREPARE:
487	case CPU_DOWN_PREPARE_FROZEN:
488	case CPU_DEAD:
489	case CPU_DEAD_FROZEN:
490		hrtick_clear(cpu_rq(cpu));
491		return NOTIFY_OK;
492	}
493
494	return NOTIFY_DONE;
495}
496
497static __init void init_hrtick(void)
498{
499	hotcpu_notifier(hotplug_hrtick, 0);
500}
501#else
502/*
503 * Called to set the hrtick timer state.
504 *
505 * called with rq->lock held and irqs disabled
506 */
507void hrtick_start(struct rq *rq, u64 delay)
508{
509	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
510			HRTIMER_MODE_REL_PINNED, 0);
511}
512
513static inline void init_hrtick(void)
514{
515}
516#endif /* CONFIG_SMP */
517
518static void init_rq_hrtick(struct rq *rq)
519{
520#ifdef CONFIG_SMP
521	rq->hrtick_csd_pending = 0;
522
523	rq->hrtick_csd.flags = 0;
524	rq->hrtick_csd.func = __hrtick_start;
525	rq->hrtick_csd.info = rq;
526#endif
527
528	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
529	rq->hrtick_timer.function = hrtick;
530}
531#else	/* CONFIG_SCHED_HRTICK */
532static inline void hrtick_clear(struct rq *rq)
533{
534}
535
536static inline void init_rq_hrtick(struct rq *rq)
537{
538}
539
540static inline void init_hrtick(void)
541{
542}
543#endif	/* CONFIG_SCHED_HRTICK */
544
545/*
546 * cmpxchg based fetch_or, macro so it works for different integer types
547 */
548#define fetch_or(ptr, val)						\
549({	typeof(*(ptr)) __old, __val = *(ptr);				\
550 	for (;;) {							\
551 		__old = cmpxchg((ptr), __val, __val | (val));		\
552 		if (__old == __val)					\
553 			break;						\
554 		__val = __old;						\
555 	}								\
556 	__old;								\
557})
558
559#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
560/*
561 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
562 * this avoids any races wrt polling state changes and thereby avoids
563 * spurious IPIs.
564 */
565static bool set_nr_and_not_polling(struct task_struct *p)
566{
567	struct thread_info *ti = task_thread_info(p);
568	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
569}
570
571/*
572 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
573 *
574 * If this returns true, then the idle task promises to call
575 * sched_ttwu_pending() and reschedule soon.
576 */
577static bool set_nr_if_polling(struct task_struct *p)
578{
579	struct thread_info *ti = task_thread_info(p);
580	typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags);
581
582	for (;;) {
583		if (!(val & _TIF_POLLING_NRFLAG))
584			return false;
585		if (val & _TIF_NEED_RESCHED)
586			return true;
587		old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
588		if (old == val)
589			break;
590		val = old;
591	}
592	return true;
593}
594
595#else
596static bool set_nr_and_not_polling(struct task_struct *p)
597{
598	set_tsk_need_resched(p);
599	return true;
600}
601
602#ifdef CONFIG_SMP
603static bool set_nr_if_polling(struct task_struct *p)
604{
605	return false;
606}
607#endif
608#endif
609
610/*
611 * resched_curr - mark rq's current task 'to be rescheduled now'.
612 *
613 * On UP this means the setting of the need_resched flag, on SMP it
614 * might also involve a cross-CPU call to trigger the scheduler on
615 * the target CPU.
616 */
617void resched_curr(struct rq *rq)
618{
619	struct task_struct *curr = rq->curr;
620	int cpu;
621
622	lockdep_assert_held(&rq->lock);
623
624	if (test_tsk_need_resched(curr))
625		return;
626
627	cpu = cpu_of(rq);
628
629	if (cpu == smp_processor_id()) {
630		set_tsk_need_resched(curr);
631		set_preempt_need_resched();
632		return;
633	}
634
635	if (set_nr_and_not_polling(curr))
636		smp_send_reschedule(cpu);
637	else
638		trace_sched_wake_idle_without_ipi(cpu);
639}
640
641void resched_cpu(int cpu)
642{
643	struct rq *rq = cpu_rq(cpu);
644	unsigned long flags;
645
646	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
647		return;
648	resched_curr(rq);
649	raw_spin_unlock_irqrestore(&rq->lock, flags);
650}
651
652#ifdef CONFIG_SMP
653#ifdef CONFIG_NO_HZ_COMMON
654/*
655 * In the semi idle case, use the nearest busy cpu for migrating timers
656 * from an idle cpu.  This is good for power-savings.
657 *
658 * We don't do similar optimization for completely idle system, as
659 * selecting an idle cpu will add more delays to the timers than intended
660 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
661 */
662int get_nohz_timer_target(int pinned)
663{
664	int cpu = smp_processor_id();
665	int i;
666	struct sched_domain *sd;
667
668	if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
669		return cpu;
670
671	rcu_read_lock();
672	for_each_domain(cpu, sd) {
673		for_each_cpu(i, sched_domain_span(sd)) {
674			if (!idle_cpu(i)) {
675				cpu = i;
676				goto unlock;
677			}
678		}
679	}
680unlock:
681	rcu_read_unlock();
682	return cpu;
683}
684/*
685 * When add_timer_on() enqueues a timer into the timer wheel of an
686 * idle CPU then this timer might expire before the next timer event
687 * which is scheduled to wake up that CPU. In case of a completely
688 * idle system the next event might even be infinite time into the
689 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
690 * leaves the inner idle loop so the newly added timer is taken into
691 * account when the CPU goes back to idle and evaluates the timer
692 * wheel for the next timer event.
693 */
694static void wake_up_idle_cpu(int cpu)
695{
696	struct rq *rq = cpu_rq(cpu);
697
698	if (cpu == smp_processor_id())
699		return;
700
701	if (set_nr_and_not_polling(rq->idle))
702		smp_send_reschedule(cpu);
703	else
704		trace_sched_wake_idle_without_ipi(cpu);
705}
706
707static bool wake_up_full_nohz_cpu(int cpu)
708{
709	/*
710	 * We just need the target to call irq_exit() and re-evaluate
711	 * the next tick. The nohz full kick at least implies that.
712	 * If needed we can still optimize that later with an
713	 * empty IRQ.
714	 */
715	if (tick_nohz_full_cpu(cpu)) {
716		if (cpu != smp_processor_id() ||
717		    tick_nohz_tick_stopped())
718			tick_nohz_full_kick_cpu(cpu);
719		return true;
720	}
721
722	return false;
723}
724
725void wake_up_nohz_cpu(int cpu)
726{
727	if (!wake_up_full_nohz_cpu(cpu))
728		wake_up_idle_cpu(cpu);
729}
730
731static inline bool got_nohz_idle_kick(void)
732{
733	int cpu = smp_processor_id();
734
735	if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
736		return false;
737
738	if (idle_cpu(cpu) && !need_resched())
739		return true;
740
741	/*
742	 * We can't run Idle Load Balance on this CPU for this time so we
743	 * cancel it and clear NOHZ_BALANCE_KICK
744	 */
745	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
746	return false;
747}
748
749#else /* CONFIG_NO_HZ_COMMON */
750
751static inline bool got_nohz_idle_kick(void)
752{
753	return false;
754}
755
756#endif /* CONFIG_NO_HZ_COMMON */
757
758#ifdef CONFIG_NO_HZ_FULL
759bool sched_can_stop_tick(void)
760{
761	/*
762	 * More than one running task need preemption.
763	 * nr_running update is assumed to be visible
764	 * after IPI is sent from wakers.
765	 */
766	if (this_rq()->nr_running > 1)
767		return false;
768
769	return true;
770}
771#endif /* CONFIG_NO_HZ_FULL */
772
773void sched_avg_update(struct rq *rq)
774{
775	s64 period = sched_avg_period();
776
777	while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
778		/*
779		 * Inline assembly required to prevent the compiler
780		 * optimising this loop into a divmod call.
781		 * See __iter_div_u64_rem() for another example of this.
782		 */
783		asm("" : "+rm" (rq->age_stamp));
784		rq->age_stamp += period;
785		rq->rt_avg /= 2;
786	}
787}
788
789#endif /* CONFIG_SMP */
790
791#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
792			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
793/*
794 * Iterate task_group tree rooted at *from, calling @down when first entering a
795 * node and @up when leaving it for the final time.
796 *
797 * Caller must hold rcu_lock or sufficient equivalent.
798 */
799int walk_tg_tree_from(struct task_group *from,
800			     tg_visitor down, tg_visitor up, void *data)
801{
802	struct task_group *parent, *child;
803	int ret;
804
805	parent = from;
806
807down:
808	ret = (*down)(parent, data);
809	if (ret)
810		goto out;
811	list_for_each_entry_rcu(child, &parent->children, siblings) {
812		parent = child;
813		goto down;
814
815up:
816		continue;
817	}
818	ret = (*up)(parent, data);
819	if (ret || parent == from)
820		goto out;
821
822	child = parent;
823	parent = parent->parent;
824	if (parent)
825		goto up;
826out:
827	return ret;
828}
829
830int tg_nop(struct task_group *tg, void *data)
831{
832	return 0;
833}
834#endif
835
836static void set_load_weight(struct task_struct *p)
837{
838	int prio = p->static_prio - MAX_RT_PRIO;
839	struct load_weight *load = &p->se.load;
840
841	/*
842	 * SCHED_IDLE tasks get minimal weight:
843	 */
844	if (p->policy == SCHED_IDLE) {
845		load->weight = scale_load(WEIGHT_IDLEPRIO);
846		load->inv_weight = WMULT_IDLEPRIO;
847		return;
848	}
849
850	load->weight = scale_load(prio_to_weight[prio]);
851	load->inv_weight = prio_to_wmult[prio];
852}
853
854static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
855{
856	update_rq_clock(rq);
857	sched_info_queued(rq, p);
858	p->sched_class->enqueue_task(rq, p, flags);
859}
860
861static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
862{
863	update_rq_clock(rq);
864	sched_info_dequeued(rq, p);
865	p->sched_class->dequeue_task(rq, p, flags);
866}
867
868void activate_task(struct rq *rq, struct task_struct *p, int flags)
869{
870	if (task_contributes_to_load(p))
871		rq->nr_uninterruptible--;
872
873	enqueue_task(rq, p, flags);
874}
875
876void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
877{
878	if (task_contributes_to_load(p))
879		rq->nr_uninterruptible++;
880
881	dequeue_task(rq, p, flags);
882}
883
884static void update_rq_clock_task(struct rq *rq, s64 delta)
885{
886/*
887 * In theory, the compile should just see 0 here, and optimize out the call
888 * to sched_rt_avg_update. But I don't trust it...
889 */
890#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
891	s64 steal = 0, irq_delta = 0;
892#endif
893#ifdef CONFIG_IRQ_TIME_ACCOUNTING
894	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
895
896	/*
897	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
898	 * this case when a previous update_rq_clock() happened inside a
899	 * {soft,}irq region.
900	 *
901	 * When this happens, we stop ->clock_task and only update the
902	 * prev_irq_time stamp to account for the part that fit, so that a next
903	 * update will consume the rest. This ensures ->clock_task is
904	 * monotonic.
905	 *
906	 * It does however cause some slight miss-attribution of {soft,}irq
907	 * time, a more accurate solution would be to update the irq_time using
908	 * the current rq->clock timestamp, except that would require using
909	 * atomic ops.
910	 */
911	if (irq_delta > delta)
912		irq_delta = delta;
913
914	rq->prev_irq_time += irq_delta;
915	delta -= irq_delta;
916#endif
917#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
918	if (static_key_false((&paravirt_steal_rq_enabled))) {
919		steal = paravirt_steal_clock(cpu_of(rq));
920		steal -= rq->prev_steal_time_rq;
921
922		if (unlikely(steal > delta))
923			steal = delta;
924
925		rq->prev_steal_time_rq += steal;
926		delta -= steal;
927	}
928#endif
929
930	rq->clock_task += delta;
931
932#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
933	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
934		sched_rt_avg_update(rq, irq_delta + steal);
935#endif
936}
937
938void sched_set_stop_task(int cpu, struct task_struct *stop)
939{
940	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
941	struct task_struct *old_stop = cpu_rq(cpu)->stop;
942
943	if (stop) {
944		/*
945		 * Make it appear like a SCHED_FIFO task, its something
946		 * userspace knows about and won't get confused about.
947		 *
948		 * Also, it will make PI more or less work without too
949		 * much confusion -- but then, stop work should not
950		 * rely on PI working anyway.
951		 */
952		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
953
954		stop->sched_class = &stop_sched_class;
955	}
956
957	cpu_rq(cpu)->stop = stop;
958
959	if (old_stop) {
960		/*
961		 * Reset it back to a normal scheduling class so that
962		 * it can die in pieces.
963		 */
964		old_stop->sched_class = &rt_sched_class;
965	}
966}
967
968/*
969 * __normal_prio - return the priority that is based on the static prio
970 */
971static inline int __normal_prio(struct task_struct *p)
972{
973	return p->static_prio;
974}
975
976/*
977 * Calculate the expected normal priority: i.e. priority
978 * without taking RT-inheritance into account. Might be
979 * boosted by interactivity modifiers. Changes upon fork,
980 * setprio syscalls, and whenever the interactivity
981 * estimator recalculates.
982 */
983static inline int normal_prio(struct task_struct *p)
984{
985	int prio;
986
987	if (task_has_dl_policy(p))
988		prio = MAX_DL_PRIO-1;
989	else if (task_has_rt_policy(p))
990		prio = MAX_RT_PRIO-1 - p->rt_priority;
991	else
992		prio = __normal_prio(p);
993	return prio;
994}
995
996/*
997 * Calculate the current priority, i.e. the priority
998 * taken into account by the scheduler. This value might
999 * be boosted by RT tasks, or might be boosted by
1000 * interactivity modifiers. Will be RT if the task got
1001 * RT-boosted. If not then it returns p->normal_prio.
1002 */
1003static int effective_prio(struct task_struct *p)
1004{
1005	p->normal_prio = normal_prio(p);
1006	/*
1007	 * If we are RT tasks or we were boosted to RT priority,
1008	 * keep the priority unchanged. Otherwise, update priority
1009	 * to the normal priority:
1010	 */
1011	if (!rt_prio(p->prio))
1012		return p->normal_prio;
1013	return p->prio;
1014}
1015
1016/**
1017 * task_curr - is this task currently executing on a CPU?
1018 * @p: the task in question.
1019 *
1020 * Return: 1 if the task is currently executing. 0 otherwise.
1021 */
1022inline int task_curr(const struct task_struct *p)
1023{
1024	return cpu_curr(task_cpu(p)) == p;
1025}
1026
1027static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1028				       const struct sched_class *prev_class,
1029				       int oldprio)
1030{
1031	if (prev_class != p->sched_class) {
1032		if (prev_class->switched_from)
1033			prev_class->switched_from(rq, p);
1034		p->sched_class->switched_to(rq, p);
1035	} else if (oldprio != p->prio || dl_task(p))
1036		p->sched_class->prio_changed(rq, p, oldprio);
1037}
1038
1039void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1040{
1041	const struct sched_class *class;
1042
1043	if (p->sched_class == rq->curr->sched_class) {
1044		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1045	} else {
1046		for_each_class(class) {
1047			if (class == rq->curr->sched_class)
1048				break;
1049			if (class == p->sched_class) {
1050				resched_curr(rq);
1051				break;
1052			}
1053		}
1054	}
1055
1056	/*
1057	 * A queue event has occurred, and we're going to schedule.  In
1058	 * this case, we can save a useless back to back clock update.
1059	 */
1060	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1061		rq->skip_clock_update = 1;
1062}
1063
1064#ifdef CONFIG_SMP
1065void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1066{
1067#ifdef CONFIG_SCHED_DEBUG
1068	/*
1069	 * We should never call set_task_cpu() on a blocked task,
1070	 * ttwu() will sort out the placement.
1071	 */
1072	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1073			!(task_preempt_count(p) & PREEMPT_ACTIVE));
1074
1075#ifdef CONFIG_LOCKDEP
1076	/*
1077	 * The caller should hold either p->pi_lock or rq->lock, when changing
1078	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1079	 *
1080	 * sched_move_task() holds both and thus holding either pins the cgroup,
1081	 * see task_group().
1082	 *
1083	 * Furthermore, all task_rq users should acquire both locks, see
1084	 * task_rq_lock().
1085	 */
1086	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1087				      lockdep_is_held(&task_rq(p)->lock)));
1088#endif
1089#endif
1090
1091	trace_sched_migrate_task(p, new_cpu);
1092
1093	if (task_cpu(p) != new_cpu) {
1094		if (p->sched_class->migrate_task_rq)
1095			p->sched_class->migrate_task_rq(p, new_cpu);
1096		p->se.nr_migrations++;
1097		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
1098	}
1099
1100	__set_task_cpu(p, new_cpu);
1101}
1102
1103static void __migrate_swap_task(struct task_struct *p, int cpu)
1104{
1105	if (task_on_rq_queued(p)) {
1106		struct rq *src_rq, *dst_rq;
1107
1108		src_rq = task_rq(p);
1109		dst_rq = cpu_rq(cpu);
1110
1111		deactivate_task(src_rq, p, 0);
1112		set_task_cpu(p, cpu);
1113		activate_task(dst_rq, p, 0);
1114		check_preempt_curr(dst_rq, p, 0);
1115	} else {
1116		/*
1117		 * Task isn't running anymore; make it appear like we migrated
1118		 * it before it went to sleep. This means on wakeup we make the
1119		 * previous cpu our targer instead of where it really is.
1120		 */
1121		p->wake_cpu = cpu;
1122	}
1123}
1124
1125struct migration_swap_arg {
1126	struct task_struct *src_task, *dst_task;
1127	int src_cpu, dst_cpu;
1128};
1129
1130static int migrate_swap_stop(void *data)
1131{
1132	struct migration_swap_arg *arg = data;
1133	struct rq *src_rq, *dst_rq;
1134	int ret = -EAGAIN;
1135
1136	src_rq = cpu_rq(arg->src_cpu);
1137	dst_rq = cpu_rq(arg->dst_cpu);
1138
1139	double_raw_lock(&arg->src_task->pi_lock,
1140			&arg->dst_task->pi_lock);
1141	double_rq_lock(src_rq, dst_rq);
1142	if (task_cpu(arg->dst_task) != arg->dst_cpu)
1143		goto unlock;
1144
1145	if (task_cpu(arg->src_task) != arg->src_cpu)
1146		goto unlock;
1147
1148	if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1149		goto unlock;
1150
1151	if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1152		goto unlock;
1153
1154	__migrate_swap_task(arg->src_task, arg->dst_cpu);
1155	__migrate_swap_task(arg->dst_task, arg->src_cpu);
1156
1157	ret = 0;
1158
1159unlock:
1160	double_rq_unlock(src_rq, dst_rq);
1161	raw_spin_unlock(&arg->dst_task->pi_lock);
1162	raw_spin_unlock(&arg->src_task->pi_lock);
1163
1164	return ret;
1165}
1166
1167/*
1168 * Cross migrate two tasks
1169 */
1170int migrate_swap(struct task_struct *cur, struct task_struct *p)
1171{
1172	struct migration_swap_arg arg;
1173	int ret = -EINVAL;
1174
1175	arg = (struct migration_swap_arg){
1176		.src_task = cur,
1177		.src_cpu = task_cpu(cur),
1178		.dst_task = p,
1179		.dst_cpu = task_cpu(p),
1180	};
1181
1182	if (arg.src_cpu == arg.dst_cpu)
1183		goto out;
1184
1185	/*
1186	 * These three tests are all lockless; this is OK since all of them
1187	 * will be re-checked with proper locks held further down the line.
1188	 */
1189	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1190		goto out;
1191
1192	if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1193		goto out;
1194
1195	if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1196		goto out;
1197
1198	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1199	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1200
1201out:
1202	return ret;
1203}
1204
1205struct migration_arg {
1206	struct task_struct *task;
1207	int dest_cpu;
1208};
1209
1210static int migration_cpu_stop(void *data);
1211
1212/*
1213 * wait_task_inactive - wait for a thread to unschedule.
1214 *
1215 * If @match_state is nonzero, it's the @p->state value just checked and
1216 * not expected to change.  If it changes, i.e. @p might have woken up,
1217 * then return zero.  When we succeed in waiting for @p to be off its CPU,
1218 * we return a positive number (its total switch count).  If a second call
1219 * a short while later returns the same number, the caller can be sure that
1220 * @p has remained unscheduled the whole time.
1221 *
1222 * The caller must ensure that the task *will* unschedule sometime soon,
1223 * else this function might spin for a *long* time. This function can't
1224 * be called with interrupts off, or it may introduce deadlock with
1225 * smp_call_function() if an IPI is sent by the same process we are
1226 * waiting to become inactive.
1227 */
1228unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1229{
1230	unsigned long flags;
1231	int running, queued;
1232	unsigned long ncsw;
1233	struct rq *rq;
1234
1235	for (;;) {
1236		/*
1237		 * We do the initial early heuristics without holding
1238		 * any task-queue locks at all. We'll only try to get
1239		 * the runqueue lock when things look like they will
1240		 * work out!
1241		 */
1242		rq = task_rq(p);
1243
1244		/*
1245		 * If the task is actively running on another CPU
1246		 * still, just relax and busy-wait without holding
1247		 * any locks.
1248		 *
1249		 * NOTE! Since we don't hold any locks, it's not
1250		 * even sure that "rq" stays as the right runqueue!
1251		 * But we don't care, since "task_running()" will
1252		 * return false if the runqueue has changed and p
1253		 * is actually now running somewhere else!
1254		 */
1255		while (task_running(rq, p)) {
1256			if (match_state && unlikely(p->state != match_state))
1257				return 0;
1258			cpu_relax();
1259		}
1260
1261		/*
1262		 * Ok, time to look more closely! We need the rq
1263		 * lock now, to be *sure*. If we're wrong, we'll
1264		 * just go back and repeat.
1265		 */
1266		rq = task_rq_lock(p, &flags);
1267		trace_sched_wait_task(p);
1268		running = task_running(rq, p);
1269		queued = task_on_rq_queued(p);
1270		ncsw = 0;
1271		if (!match_state || p->state == match_state)
1272			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1273		task_rq_unlock(rq, p, &flags);
1274
1275		/*
1276		 * If it changed from the expected state, bail out now.
1277		 */
1278		if (unlikely(!ncsw))
1279			break;
1280
1281		/*
1282		 * Was it really running after all now that we
1283		 * checked with the proper locks actually held?
1284		 *
1285		 * Oops. Go back and try again..
1286		 */
1287		if (unlikely(running)) {
1288			cpu_relax();
1289			continue;
1290		}
1291
1292		/*
1293		 * It's not enough that it's not actively running,
1294		 * it must be off the runqueue _entirely_, and not
1295		 * preempted!
1296		 *
1297		 * So if it was still runnable (but just not actively
1298		 * running right now), it's preempted, and we should
1299		 * yield - it could be a while.
1300		 */
1301		if (unlikely(queued)) {
1302			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1303
1304			set_current_state(TASK_UNINTERRUPTIBLE);
1305			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1306			continue;
1307		}
1308
1309		/*
1310		 * Ahh, all good. It wasn't running, and it wasn't
1311		 * runnable, which means that it will never become
1312		 * running in the future either. We're all done!
1313		 */
1314		break;
1315	}
1316
1317	return ncsw;
1318}
1319
1320/***
1321 * kick_process - kick a running thread to enter/exit the kernel
1322 * @p: the to-be-kicked thread
1323 *
1324 * Cause a process which is running on another CPU to enter
1325 * kernel-mode, without any delay. (to get signals handled.)
1326 *
1327 * NOTE: this function doesn't have to take the runqueue lock,
1328 * because all it wants to ensure is that the remote task enters
1329 * the kernel. If the IPI races and the task has been migrated
1330 * to another CPU then no harm is done and the purpose has been
1331 * achieved as well.
1332 */
1333void kick_process(struct task_struct *p)
1334{
1335	int cpu;
1336
1337	preempt_disable();
1338	cpu = task_cpu(p);
1339	if ((cpu != smp_processor_id()) && task_curr(p))
1340		smp_send_reschedule(cpu);
1341	preempt_enable();
1342}
1343EXPORT_SYMBOL_GPL(kick_process);
1344#endif /* CONFIG_SMP */
1345
1346#ifdef CONFIG_SMP
1347/*
1348 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1349 */
1350static int select_fallback_rq(int cpu, struct task_struct *p)
1351{
1352	int nid = cpu_to_node(cpu);
1353	const struct cpumask *nodemask = NULL;
1354	enum { cpuset, possible, fail } state = cpuset;
1355	int dest_cpu;
1356
1357	/*
1358	 * If the node that the cpu is on has been offlined, cpu_to_node()
1359	 * will return -1. There is no cpu on the node, and we should
1360	 * select the cpu on the other node.
1361	 */
1362	if (nid != -1) {
1363		nodemask = cpumask_of_node(nid);
1364
1365		/* Look for allowed, online CPU in same node. */
1366		for_each_cpu(dest_cpu, nodemask) {
1367			if (!cpu_online(dest_cpu))
1368				continue;
1369			if (!cpu_active(dest_cpu))
1370				continue;
1371			if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1372				return dest_cpu;
1373		}
1374	}
1375
1376	for (;;) {
1377		/* Any allowed, online CPU? */
1378		for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1379			if (!cpu_online(dest_cpu))
1380				continue;
1381			if (!cpu_active(dest_cpu))
1382				continue;
1383			goto out;
1384		}
1385
1386		switch (state) {
1387		case cpuset:
1388			/* No more Mr. Nice Guy. */
1389			cpuset_cpus_allowed_fallback(p);
1390			state = possible;
1391			break;
1392
1393		case possible:
1394			do_set_cpus_allowed(p, cpu_possible_mask);
1395			state = fail;
1396			break;
1397
1398		case fail:
1399			BUG();
1400			break;
1401		}
1402	}
1403
1404out:
1405	if (state != cpuset) {
1406		/*
1407		 * Don't tell them about moving exiting tasks or
1408		 * kernel threads (both mm NULL), since they never
1409		 * leave kernel.
1410		 */
1411		if (p->mm && printk_ratelimit()) {
1412			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1413					task_pid_nr(p), p->comm, cpu);
1414		}
1415	}
1416
1417	return dest_cpu;
1418}
1419
1420/*
1421 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1422 */
1423static inline
1424int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1425{
1426	cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1427
1428	/*
1429	 * In order not to call set_task_cpu() on a blocking task we need
1430	 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1431	 * cpu.
1432	 *
1433	 * Since this is common to all placement strategies, this lives here.
1434	 *
1435	 * [ this allows ->select_task() to simply return task_cpu(p) and
1436	 *   not worry about this generic constraint ]
1437	 */
1438	if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1439		     !cpu_online(cpu)))
1440		cpu = select_fallback_rq(task_cpu(p), p);
1441
1442	return cpu;
1443}
1444
1445static void update_avg(u64 *avg, u64 sample)
1446{
1447	s64 diff = sample - *avg;
1448	*avg += diff >> 3;
1449}
1450#endif
1451
1452static void
1453ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1454{
1455#ifdef CONFIG_SCHEDSTATS
1456	struct rq *rq = this_rq();
1457
1458#ifdef CONFIG_SMP
1459	int this_cpu = smp_processor_id();
1460
1461	if (cpu == this_cpu) {
1462		schedstat_inc(rq, ttwu_local);
1463		schedstat_inc(p, se.statistics.nr_wakeups_local);
1464	} else {
1465		struct sched_domain *sd;
1466
1467		schedstat_inc(p, se.statistics.nr_wakeups_remote);
1468		rcu_read_lock();
1469		for_each_domain(this_cpu, sd) {
1470			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1471				schedstat_inc(sd, ttwu_wake_remote);
1472				break;
1473			}
1474		}
1475		rcu_read_unlock();
1476	}
1477
1478	if (wake_flags & WF_MIGRATED)
1479		schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1480
1481#endif /* CONFIG_SMP */
1482
1483	schedstat_inc(rq, ttwu_count);
1484	schedstat_inc(p, se.statistics.nr_wakeups);
1485
1486	if (wake_flags & WF_SYNC)
1487		schedstat_inc(p, se.statistics.nr_wakeups_sync);
1488
1489#endif /* CONFIG_SCHEDSTATS */
1490}
1491
1492static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1493{
1494	activate_task(rq, p, en_flags);
1495	p->on_rq = TASK_ON_RQ_QUEUED;
1496
1497	/* if a worker is waking up, notify workqueue */
1498	if (p->flags & PF_WQ_WORKER)
1499		wq_worker_waking_up(p, cpu_of(rq));
1500}
1501
1502/*
1503 * Mark the task runnable and perform wakeup-preemption.
1504 */
1505static void
1506ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1507{
1508	check_preempt_curr(rq, p, wake_flags);
1509	trace_sched_wakeup(p, true);
1510
1511	p->state = TASK_RUNNING;
1512#ifdef CONFIG_SMP
1513	if (p->sched_class->task_woken)
1514		p->sched_class->task_woken(rq, p);
1515
1516	if (rq->idle_stamp) {
1517		u64 delta = rq_clock(rq) - rq->idle_stamp;
1518		u64 max = 2*rq->max_idle_balance_cost;
1519
1520		update_avg(&rq->avg_idle, delta);
1521
1522		if (rq->avg_idle > max)
1523			rq->avg_idle = max;
1524
1525		rq->idle_stamp = 0;
1526	}
1527#endif
1528}
1529
1530static void
1531ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1532{
1533#ifdef CONFIG_SMP
1534	if (p->sched_contributes_to_load)
1535		rq->nr_uninterruptible--;
1536#endif
1537
1538	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1539	ttwu_do_wakeup(rq, p, wake_flags);
1540}
1541
1542/*
1543 * Called in case the task @p isn't fully descheduled from its runqueue,
1544 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1545 * since all we need to do is flip p->state to TASK_RUNNING, since
1546 * the task is still ->on_rq.
1547 */
1548static int ttwu_remote(struct task_struct *p, int wake_flags)
1549{
1550	struct rq *rq;
1551	int ret = 0;
1552
1553	rq = __task_rq_lock(p);
1554	if (task_on_rq_queued(p)) {
1555		/* check_preempt_curr() may use rq clock */
1556		update_rq_clock(rq);
1557		ttwu_do_wakeup(rq, p, wake_flags);
1558		ret = 1;
1559	}
1560	__task_rq_unlock(rq);
1561
1562	return ret;
1563}
1564
1565#ifdef CONFIG_SMP
1566void sched_ttwu_pending(void)
1567{
1568	struct rq *rq = this_rq();
1569	struct llist_node *llist = llist_del_all(&rq->wake_list);
1570	struct task_struct *p;
1571	unsigned long flags;
1572
1573	if (!llist)
1574		return;
1575
1576	raw_spin_lock_irqsave(&rq->lock, flags);
1577
1578	while (llist) {
1579		p = llist_entry(llist, struct task_struct, wake_entry);
1580		llist = llist_next(llist);
1581		ttwu_do_activate(rq, p, 0);
1582	}
1583
1584	raw_spin_unlock_irqrestore(&rq->lock, flags);
1585}
1586
1587void scheduler_ipi(void)
1588{
1589	/*
1590	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1591	 * TIF_NEED_RESCHED remotely (for the first time) will also send
1592	 * this IPI.
1593	 */
1594	preempt_fold_need_resched();
1595
1596	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1597		return;
1598
1599	/*
1600	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1601	 * traditionally all their work was done from the interrupt return
1602	 * path. Now that we actually do some work, we need to make sure
1603	 * we do call them.
1604	 *
1605	 * Some archs already do call them, luckily irq_enter/exit nest
1606	 * properly.
1607	 *
1608	 * Arguably we should visit all archs and update all handlers,
1609	 * however a fair share of IPIs are still resched only so this would
1610	 * somewhat pessimize the simple resched case.
1611	 */
1612	irq_enter();
1613	sched_ttwu_pending();
1614
1615	/*
1616	 * Check if someone kicked us for doing the nohz idle load balance.
1617	 */
1618	if (unlikely(got_nohz_idle_kick())) {
1619		this_rq()->idle_balance = 1;
1620		raise_softirq_irqoff(SCHED_SOFTIRQ);
1621	}
1622	irq_exit();
1623}
1624
1625static void ttwu_queue_remote(struct task_struct *p, int cpu)
1626{
1627	struct rq *rq = cpu_rq(cpu);
1628
1629	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1630		if (!set_nr_if_polling(rq->idle))
1631			smp_send_reschedule(cpu);
1632		else
1633			trace_sched_wake_idle_without_ipi(cpu);
1634	}
1635}
1636
1637void wake_up_if_idle(int cpu)
1638{
1639	struct rq *rq = cpu_rq(cpu);
1640	unsigned long flags;
1641
1642	if (!is_idle_task(rq->curr))
1643		return;
1644
1645	if (set_nr_if_polling(rq->idle)) {
1646		trace_sched_wake_idle_without_ipi(cpu);
1647	} else {
1648		raw_spin_lock_irqsave(&rq->lock, flags);
1649		if (is_idle_task(rq->curr))
1650			smp_send_reschedule(cpu);
1651		/* Else cpu is not in idle, do nothing here */
1652		raw_spin_unlock_irqrestore(&rq->lock, flags);
1653	}
1654}
1655
1656bool cpus_share_cache(int this_cpu, int that_cpu)
1657{
1658	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1659}
1660#endif /* CONFIG_SMP */
1661
1662static void ttwu_queue(struct task_struct *p, int cpu)
1663{
1664	struct rq *rq = cpu_rq(cpu);
1665
1666#if defined(CONFIG_SMP)
1667	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1668		sched_clock_cpu(cpu); /* sync clocks x-cpu */
1669		ttwu_queue_remote(p, cpu);
1670		return;
1671	}
1672#endif
1673
1674	raw_spin_lock(&rq->lock);
1675	ttwu_do_activate(rq, p, 0);
1676	raw_spin_unlock(&rq->lock);
1677}
1678
1679/**
1680 * try_to_wake_up - wake up a thread
1681 * @p: the thread to be awakened
1682 * @state: the mask of task states that can be woken
1683 * @wake_flags: wake modifier flags (WF_*)
1684 *
1685 * Put it on the run-queue if it's not already there. The "current"
1686 * thread is always on the run-queue (except when the actual
1687 * re-schedule is in progress), and as such you're allowed to do
1688 * the simpler "current->state = TASK_RUNNING" to mark yourself
1689 * runnable without the overhead of this.
1690 *
1691 * Return: %true if @p was woken up, %false if it was already running.
1692 * or @state didn't match @p's state.
1693 */
1694static int
1695try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1696{
1697	unsigned long flags;
1698	int cpu, success = 0;
1699
1700	/*
1701	 * If we are going to wake up a thread waiting for CONDITION we
1702	 * need to ensure that CONDITION=1 done by the caller can not be
1703	 * reordered with p->state check below. This pairs with mb() in
1704	 * set_current_state() the waiting thread does.
1705	 */
1706	smp_mb__before_spinlock();
1707	raw_spin_lock_irqsave(&p->pi_lock, flags);
1708	if (!(p->state & state))
1709		goto out;
1710
1711	success = 1; /* we're going to change ->state */
1712	cpu = task_cpu(p);
1713
1714	if (p->on_rq && ttwu_remote(p, wake_flags))
1715		goto stat;
1716
1717#ifdef CONFIG_SMP
1718	/*
1719	 * If the owning (remote) cpu is still in the middle of schedule() with
1720	 * this task as prev, wait until its done referencing the task.
1721	 */
1722	while (p->on_cpu)
1723		cpu_relax();
1724	/*
1725	 * Pairs with the smp_wmb() in finish_lock_switch().
1726	 */
1727	smp_rmb();
1728
1729	p->sched_contributes_to_load = !!task_contributes_to_load(p);
1730	p->state = TASK_WAKING;
1731
1732	if (p->sched_class->task_waking)
1733		p->sched_class->task_waking(p);
1734
1735	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
1736	if (task_cpu(p) != cpu) {
1737		wake_flags |= WF_MIGRATED;
1738		set_task_cpu(p, cpu);
1739	}
1740#endif /* CONFIG_SMP */
1741
1742	ttwu_queue(p, cpu);
1743stat:
1744	ttwu_stat(p, cpu, wake_flags);
1745out:
1746	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1747
1748	return success;
1749}
1750
1751/**
1752 * try_to_wake_up_local - try to wake up a local task with rq lock held
1753 * @p: the thread to be awakened
1754 *
1755 * Put @p on the run-queue if it's not already there. The caller must
1756 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1757 * the current task.
1758 */
1759static void try_to_wake_up_local(struct task_struct *p)
1760{
1761	struct rq *rq = task_rq(p);
1762
1763	if (WARN_ON_ONCE(rq != this_rq()) ||
1764	    WARN_ON_ONCE(p == current))
1765		return;
1766
1767	lockdep_assert_held(&rq->lock);
1768
1769	if (!raw_spin_trylock(&p->pi_lock)) {
1770		raw_spin_unlock(&rq->lock);
1771		raw_spin_lock(&p->pi_lock);
1772		raw_spin_lock(&rq->lock);
1773	}
1774
1775	if (!(p->state & TASK_NORMAL))
1776		goto out;
1777
1778	if (!task_on_rq_queued(p))
1779		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1780
1781	ttwu_do_wakeup(rq, p, 0);
1782	ttwu_stat(p, smp_processor_id(), 0);
1783out:
1784	raw_spin_unlock(&p->pi_lock);
1785}
1786
1787/**
1788 * wake_up_process - Wake up a specific process
1789 * @p: The process to be woken up.
1790 *
1791 * Attempt to wake up the nominated process and move it to the set of runnable
1792 * processes.
1793 *
1794 * Return: 1 if the process was woken up, 0 if it was already running.
1795 *
1796 * It may be assumed that this function implies a write memory barrier before
1797 * changing the task state if and only if any tasks are woken up.
1798 */
1799int wake_up_process(struct task_struct *p)
1800{
1801	WARN_ON(task_is_stopped_or_traced(p));
1802	return try_to_wake_up(p, TASK_NORMAL, 0);
1803}
1804EXPORT_SYMBOL(wake_up_process);
1805
1806int wake_up_state(struct task_struct *p, unsigned int state)
1807{
1808	return try_to_wake_up(p, state, 0);
1809}
1810
1811/*
1812 * Perform scheduler related setup for a newly forked process p.
1813 * p is forked by current.
1814 *
1815 * __sched_fork() is basic setup used by init_idle() too:
1816 */
1817static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1818{
1819	p->on_rq			= 0;
1820
1821	p->se.on_rq			= 0;
1822	p->se.exec_start		= 0;
1823	p->se.sum_exec_runtime		= 0;
1824	p->se.prev_sum_exec_runtime	= 0;
1825	p->se.nr_migrations		= 0;
1826	p->se.vruntime			= 0;
1827	INIT_LIST_HEAD(&p->se.group_node);
1828
1829#ifdef CONFIG_SCHEDSTATS
1830	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1831#endif
1832
1833	RB_CLEAR_NODE(&p->dl.rb_node);
1834	hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1835	p->dl.dl_runtime = p->dl.runtime = 0;
1836	p->dl.dl_deadline = p->dl.deadline = 0;
1837	p->dl.dl_period = 0;
1838	p->dl.flags = 0;
1839
1840	INIT_LIST_HEAD(&p->rt.run_list);
1841
1842#ifdef CONFIG_PREEMPT_NOTIFIERS
1843	INIT_HLIST_HEAD(&p->preempt_notifiers);
1844#endif
1845
1846#ifdef CONFIG_NUMA_BALANCING
1847	if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
1848		p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1849		p->mm->numa_scan_seq = 0;
1850	}
1851
1852	if (clone_flags & CLONE_VM)
1853		p->numa_preferred_nid = current->numa_preferred_nid;
1854	else
1855		p->numa_preferred_nid = -1;
1856
1857	p->node_stamp = 0ULL;
1858	p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
1859	p->numa_scan_period = sysctl_numa_balancing_scan_delay;
1860	p->numa_work.next = &p->numa_work;
1861	p->numa_faults_memory = NULL;
1862	p->numa_faults_buffer_memory = NULL;
1863	p->last_task_numa_placement = 0;
1864	p->last_sum_exec_runtime = 0;
1865
1866	INIT_LIST_HEAD(&p->numa_entry);
1867	p->numa_group = NULL;
1868#endif /* CONFIG_NUMA_BALANCING */
1869}
1870
1871#ifdef CONFIG_NUMA_BALANCING
1872#ifdef CONFIG_SCHED_DEBUG
1873void set_numabalancing_state(bool enabled)
1874{
1875	if (enabled)
1876		sched_feat_set("NUMA");
1877	else
1878		sched_feat_set("NO_NUMA");
1879}
1880#else
1881__read_mostly bool numabalancing_enabled;
1882
1883void set_numabalancing_state(bool enabled)
1884{
1885	numabalancing_enabled = enabled;
1886}
1887#endif /* CONFIG_SCHED_DEBUG */
1888
1889#ifdef CONFIG_PROC_SYSCTL
1890int sysctl_numa_balancing(struct ctl_table *table, int write,
1891			 void __user *buffer, size_t *lenp, loff_t *ppos)
1892{
1893	struct ctl_table t;
1894	int err;
1895	int state = numabalancing_enabled;
1896
1897	if (write && !capable(CAP_SYS_ADMIN))
1898		return -EPERM;
1899
1900	t = *table;
1901	t.data = &state;
1902	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
1903	if (err < 0)
1904		return err;
1905	if (write)
1906		set_numabalancing_state(state);
1907	return err;
1908}
1909#endif
1910#endif
1911
1912/*
1913 * fork()/clone()-time setup:
1914 */
1915int sched_fork(unsigned long clone_flags, struct task_struct *p)
1916{
1917	unsigned long flags;
1918	int cpu = get_cpu();
1919
1920	__sched_fork(clone_flags, p);
1921	/*
1922	 * We mark the process as running here. This guarantees that
1923	 * nobody will actually run it, and a signal or other external
1924	 * event cannot wake it up and insert it on the runqueue either.
1925	 */
1926	p->state = TASK_RUNNING;
1927
1928	/*
1929	 * Make sure we do not leak PI boosting priority to the child.
1930	 */
1931	p->prio = current->normal_prio;
1932
1933	/*
1934	 * Revert to default priority/policy on fork if requested.
1935	 */
1936	if (unlikely(p->sched_reset_on_fork)) {
1937		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1938			p->policy = SCHED_NORMAL;
1939			p->static_prio = NICE_TO_PRIO(0);
1940			p->rt_priority = 0;
1941		} else if (PRIO_TO_NICE(p->static_prio) < 0)
1942			p->static_prio = NICE_TO_PRIO(0);
1943
1944		p->prio = p->normal_prio = __normal_prio(p);
1945		set_load_weight(p);
1946
1947		/*
1948		 * We don't need the reset flag anymore after the fork. It has
1949		 * fulfilled its duty:
1950		 */
1951		p->sched_reset_on_fork = 0;
1952	}
1953
1954	if (dl_prio(p->prio)) {
1955		put_cpu();
1956		return -EAGAIN;
1957	} else if (rt_prio(p->prio)) {
1958		p->sched_class = &rt_sched_class;
1959	} else {
1960		p->sched_class = &fair_sched_class;
1961	}
1962
1963	if (p->sched_class->task_fork)
1964		p->sched_class->task_fork(p);
1965
1966	/*
1967	 * The child is not yet in the pid-hash so no cgroup attach races,
1968	 * and the cgroup is pinned to this child due to cgroup_fork()
1969	 * is ran before sched_fork().
1970	 *
1971	 * Silence PROVE_RCU.
1972	 */
1973	raw_spin_lock_irqsave(&p->pi_lock, flags);
1974	set_task_cpu(p, cpu);
1975	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1976
1977#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1978	if (likely(sched_info_on()))
1979		memset(&p->sched_info, 0, sizeof(p->sched_info));
1980#endif
1981#if defined(CONFIG_SMP)
1982	p->on_cpu = 0;
1983#endif
1984	init_task_preempt_count(p);
1985#ifdef CONFIG_SMP
1986	plist_node_init(&p->pushable_tasks, MAX_PRIO);
1987	RB_CLEAR_NODE(&p->pushable_dl_tasks);
1988#endif
1989
1990	put_cpu();
1991	return 0;
1992}
1993
1994unsigned long to_ratio(u64 period, u64 runtime)
1995{
1996	if (runtime == RUNTIME_INF)
1997		return 1ULL << 20;
1998
1999	/*
2000	 * Doing this here saves a lot of checks in all
2001	 * the calling paths, and returning zero seems
2002	 * safe for them anyway.
2003	 */
2004	if (period == 0)
2005		return 0;
2006
2007	return div64_u64(runtime << 20, period);
2008}
2009
2010#ifdef CONFIG_SMP
2011inline struct dl_bw *dl_bw_of(int i)
2012{
2013	return &cpu_rq(i)->rd->dl_bw;
2014}
2015
2016static inline int dl_bw_cpus(int i)
2017{
2018	struct root_domain *rd = cpu_rq(i)->rd;
2019	int cpus = 0;
2020
2021	for_each_cpu_and(i, rd->span, cpu_active_mask)
2022		cpus++;
2023
2024	return cpus;
2025}
2026#else
2027inline struct dl_bw *dl_bw_of(int i)
2028{
2029	return &cpu_rq(i)->dl.dl_bw;
2030}
2031
2032static inline int dl_bw_cpus(int i)
2033{
2034	return 1;
2035}
2036#endif
2037
2038static inline
2039void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
2040{
2041	dl_b->total_bw -= tsk_bw;
2042}
2043
2044static inline
2045void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
2046{
2047	dl_b->total_bw += tsk_bw;
2048}
2049
2050static inline
2051bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
2052{
2053	return dl_b->bw != -1 &&
2054	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
2055}
2056
2057/*
2058 * We must be sure that accepting a new task (or allowing changing the
2059 * parameters of an existing one) is consistent with the bandwidth
2060 * constraints. If yes, this function also accordingly updates the currently
2061 * allocated bandwidth to reflect the new situation.
2062 *
2063 * This function is called while holding p's rq->lock.
2064 */
2065static int dl_overflow(struct task_struct *p, int policy,
2066		       const struct sched_attr *attr)
2067{
2068
2069	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2070	u64 period = attr->sched_period ?: attr->sched_deadline;
2071	u64 runtime = attr->sched_runtime;
2072	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2073	int cpus, err = -1;
2074
2075	if (new_bw == p->dl.dl_bw)
2076		return 0;
2077
2078	/*
2079	 * Either if a task, enters, leave, or stays -deadline but changes
2080	 * its parameters, we may need to update accordingly the total
2081	 * allocated bandwidth of the container.
2082	 */
2083	raw_spin_lock(&dl_b->lock);
2084	cpus = dl_bw_cpus(task_cpu(p));
2085	if (dl_policy(policy) && !task_has_dl_policy(p) &&
2086	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2087		__dl_add(dl_b, new_bw);
2088		err = 0;
2089	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
2090		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2091		__dl_clear(dl_b, p->dl.dl_bw);
2092		__dl_add(dl_b, new_bw);
2093		err = 0;
2094	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2095		__dl_clear(dl_b, p->dl.dl_bw);
2096		err = 0;
2097	}
2098	raw_spin_unlock(&dl_b->lock);
2099
2100	return err;
2101}
2102
2103extern void init_dl_bw(struct dl_bw *dl_b);
2104
2105/*
2106 * wake_up_new_task - wake up a newly created task for the first time.
2107 *
2108 * This function will do some initial scheduler statistics housekeeping
2109 * that must be done for every newly created context, then puts the task
2110 * on the runqueue and wakes it.
2111 */
2112void wake_up_new_task(struct task_struct *p)
2113{
2114	unsigned long flags;
2115	struct rq *rq;
2116
2117	raw_spin_lock_irqsave(&p->pi_lock, flags);
2118#ifdef CONFIG_SMP
2119	/*
2120	 * Fork balancing, do it here and not earlier because:
2121	 *  - cpus_allowed can change in the fork path
2122	 *  - any previously selected cpu might disappear through hotplug
2123	 */
2124	set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2125#endif
2126
2127	/* Initialize new task's runnable average */
2128	init_task_runnable_average(p);
2129	rq = __task_rq_lock(p);
2130	activate_task(rq, p, 0);
2131	p->on_rq = TASK_ON_RQ_QUEUED;
2132	trace_sched_wakeup_new(p, true);
2133	check_preempt_curr(rq, p, WF_FORK);
2134#ifdef CONFIG_SMP
2135	if (p->sched_class->task_woken)
2136		p->sched_class->task_woken(rq, p);
2137#endif
2138	task_rq_unlock(rq, p, &flags);
2139}
2140
2141#ifdef CONFIG_PREEMPT_NOTIFIERS
2142
2143/**
2144 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2145 * @notifier: notifier struct to register
2146 */
2147void preempt_notifier_register(struct preempt_notifier *notifier)
2148{
2149	hlist_add_head(&notifier->link, &current->preempt_notifiers);
2150}
2151EXPORT_SYMBOL_GPL(preempt_notifier_register);
2152
2153/**
2154 * preempt_notifier_unregister - no longer interested in preemption notifications
2155 * @notifier: notifier struct to unregister
2156 *
2157 * This is safe to call from within a preemption notifier.
2158 */
2159void preempt_notifier_unregister(struct preempt_notifier *notifier)
2160{
2161	hlist_del(&notifier->link);
2162}
2163EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2164
2165static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2166{
2167	struct preempt_notifier *notifier;
2168
2169	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2170		notifier->ops->sched_in(notifier, raw_smp_processor_id());
2171}
2172
2173static void
2174fire_sched_out_preempt_notifiers(struct task_struct *curr,
2175				 struct task_struct *next)
2176{
2177	struct preempt_notifier *notifier;
2178
2179	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2180		notifier->ops->sched_out(notifier, next);
2181}
2182
2183#else /* !CONFIG_PREEMPT_NOTIFIERS */
2184
2185static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2186{
2187}
2188
2189static void
2190fire_sched_out_preempt_notifiers(struct task_struct *curr,
2191				 struct task_struct *next)
2192{
2193}
2194
2195#endif /* CONFIG_PREEMPT_NOTIFIERS */
2196
2197/**
2198 * prepare_task_switch - prepare to switch tasks
2199 * @rq: the runqueue preparing to switch
2200 * @prev: the current task that is being switched out
2201 * @next: the task we are going to switch to.
2202 *
2203 * This is called with the rq lock held and interrupts off. It must
2204 * be paired with a subsequent finish_task_switch after the context
2205 * switch.
2206 *
2207 * prepare_task_switch sets up locking and calls architecture specific
2208 * hooks.
2209 */
2210static inline void
2211prepare_task_switch(struct rq *rq, struct task_struct *prev,
2212		    struct task_struct *next)
2213{
2214	trace_sched_switch(prev, next);
2215	sched_info_switch(rq, prev, next);
2216	perf_event_task_sched_out(prev, next);
2217	fire_sched_out_preempt_notifiers(prev, next);
2218	prepare_lock_switch(rq, next);
2219	prepare_arch_switch(next);
2220}
2221
2222/**
2223 * finish_task_switch - clean up after a task-switch
2224 * @rq: runqueue associated with task-switch
2225 * @prev: the thread we just switched away from.
2226 *
2227 * finish_task_switch must be called after the context switch, paired
2228 * with a prepare_task_switch call before the context switch.
2229 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2230 * and do any other architecture-specific cleanup actions.
2231 *
2232 * Note that we may have delayed dropping an mm in context_switch(). If
2233 * so, we finish that here outside of the runqueue lock. (Doing it
2234 * with the lock held can cause deadlocks; see schedule() for
2235 * details.)
2236 */
2237static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2238	__releases(rq->lock)
2239{
2240	struct mm_struct *mm = rq->prev_mm;
2241	long prev_state;
2242
2243	rq->prev_mm = NULL;
2244
2245	/*
2246	 * A task struct has one reference for the use as "current".
2247	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2248	 * schedule one last time. The schedule call will never return, and
2249	 * the scheduled task must drop that reference.
2250	 * The test for TASK_DEAD must occur while the runqueue locks are
2251	 * still held, otherwise prev could be scheduled on another cpu, die
2252	 * there before we look at prev->state, and then the reference would
2253	 * be dropped twice.
2254	 *		Manfred Spraul <manfred@colorfullife.com>
2255	 */
2256	prev_state = prev->state;
2257	vtime_task_switch(prev);
2258	finish_arch_switch(prev);
2259	perf_event_task_sched_in(prev, current);
2260	finish_lock_switch(rq, prev);
2261	finish_arch_post_lock_switch();
2262
2263	fire_sched_in_preempt_notifiers(current);
2264	if (mm)
2265		mmdrop(mm);
2266	if (unlikely(prev_state == TASK_DEAD)) {
2267		if (prev->sched_class->task_dead)
2268			prev->sched_class->task_dead(prev);
2269
2270		/*
2271		 * Remove function-return probe instances associated with this
2272		 * task and put them back on the free list.
2273		 */
2274		kprobe_flush_task(prev);
2275		put_task_struct(prev);
2276	}
2277
2278	tick_nohz_task_switch(current);
2279}
2280
2281#ifdef CONFIG_SMP
2282
2283/* rq->lock is NOT held, but preemption is disabled */
2284static inline void post_schedule(struct rq *rq)
2285{
2286	if (rq->post_schedule) {
2287		unsigned long flags;
2288
2289		raw_spin_lock_irqsave(&rq->lock, flags);
2290		if (rq->curr->sched_class->post_schedule)
2291			rq->curr->sched_class->post_schedule(rq);
2292		raw_spin_unlock_irqrestore(&rq->lock, flags);
2293
2294		rq->post_schedule = 0;
2295	}
2296}
2297
2298#else
2299
2300static inline void post_schedule(struct rq *rq)
2301{
2302}
2303
2304#endif
2305
2306/**
2307 * schedule_tail - first thing a freshly forked thread must call.
2308 * @prev: the thread we just switched away from.
2309 */
2310asmlinkage __visible void schedule_tail(struct task_struct *prev)
2311	__releases(rq->lock)
2312{
2313	struct rq *rq = this_rq();
2314
2315	finish_task_switch(rq, prev);
2316
2317	/*
2318	 * FIXME: do we need to worry about rq being invalidated by the
2319	 * task_switch?
2320	 */
2321	post_schedule(rq);
2322
2323#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2324	/* In this case, finish_task_switch does not reenable preemption */
2325	preempt_enable();
2326#endif
2327	if (current->set_child_tid)
2328		put_user(task_pid_vnr(current), current->set_child_tid);
2329}
2330
2331/*
2332 * context_switch - switch to the new MM and the new
2333 * thread's register state.
2334 */
2335static inline void
2336context_switch(struct rq *rq, struct task_struct *prev,
2337	       struct task_struct *next)
2338{
2339	struct mm_struct *mm, *oldmm;
2340
2341	prepare_task_switch(rq, prev, next);
2342
2343	mm = next->mm;
2344	oldmm = prev->active_mm;
2345	/*
2346	 * For paravirt, this is coupled with an exit in switch_to to
2347	 * combine the page table reload and the switch backend into
2348	 * one hypercall.
2349	 */
2350	arch_start_context_switch(prev);
2351
2352	if (!mm) {
2353		next->active_mm = oldmm;
2354		atomic_inc(&oldmm->mm_count);
2355		enter_lazy_tlb(oldmm, next);
2356	} else
2357		switch_mm(oldmm, mm, next);
2358
2359	if (!prev->mm) {
2360		prev->active_mm = NULL;
2361		rq->prev_mm = oldmm;
2362	}
2363	/*
2364	 * Since the runqueue lock will be released by the next
2365	 * task (which is an invalid locking op but in the case
2366	 * of the scheduler it's an obvious special-case), so we
2367	 * do an early lockdep release here:
2368	 */
2369#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2370	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2371#endif
2372
2373	context_tracking_task_switch(prev, next);
2374	/* Here we just switch the register state and the stack. */
2375	switch_to(prev, next, prev);
2376
2377	barrier();
2378	/*
2379	 * this_rq must be evaluated again because prev may have moved
2380	 * CPUs since it called schedule(), thus the 'rq' on its stack
2381	 * frame will be invalid.
2382	 */
2383	finish_task_switch(this_rq(), prev);
2384}
2385
2386/*
2387 * nr_running and nr_context_switches:
2388 *
2389 * externally visible scheduler statistics: current number of runnable
2390 * threads, total number of context switches performed since bootup.
2391 */
2392unsigned long nr_running(void)
2393{
2394	unsigned long i, sum = 0;
2395
2396	for_each_online_cpu(i)
2397		sum += cpu_rq(i)->nr_running;
2398
2399	return sum;
2400}
2401
2402unsigned long long nr_context_switches(void)
2403{
2404	int i;
2405	unsigned long long sum = 0;
2406
2407	for_each_possible_cpu(i)
2408		sum += cpu_rq(i)->nr_switches;
2409
2410	return sum;
2411}
2412
2413unsigned long nr_iowait(void)
2414{
2415	unsigned long i, sum = 0;
2416
2417	for_each_possible_cpu(i)
2418		sum += atomic_read(&cpu_rq(i)->nr_iowait);
2419
2420	return sum;
2421}
2422
2423unsigned long nr_iowait_cpu(int cpu)
2424{
2425	struct rq *this = cpu_rq(cpu);
2426	return atomic_read(&this->nr_iowait);
2427}
2428
2429void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2430{
2431	struct rq *this = this_rq();
2432	*nr_waiters = atomic_read(&this->nr_iowait);
2433	*load = this->cpu_load[0];
2434}
2435
2436#ifdef CONFIG_SMP
2437
2438/*
2439 * sched_exec - execve() is a valuable balancing opportunity, because at
2440 * this point the task has the smallest effective memory and cache footprint.
2441 */
2442void sched_exec(void)
2443{
2444	struct task_struct *p = current;
2445	unsigned long flags;
2446	int dest_cpu;
2447
2448	raw_spin_lock_irqsave(&p->pi_lock, flags);
2449	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
2450	if (dest_cpu == smp_processor_id())
2451		goto unlock;
2452
2453	if (likely(cpu_active(dest_cpu))) {
2454		struct migration_arg arg = { p, dest_cpu };
2455
2456		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2457		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2458		return;
2459	}
2460unlock:
2461	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2462}
2463
2464#endif
2465
2466DEFINE_PER_CPU(struct kernel_stat, kstat);
2467DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2468
2469EXPORT_PER_CPU_SYMBOL(kstat);
2470EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2471
2472/*
2473 * Return any ns on the sched_clock that have not yet been accounted in
2474 * @p in case that task is currently running.
2475 *
2476 * Called with task_rq_lock() held on @rq.
2477 */
2478static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2479{
2480	u64 ns = 0;
2481
2482	/*
2483	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
2484	 * project cycles that may never be accounted to this
2485	 * thread, breaking clock_gettime().
2486	 */
2487	if (task_current(rq, p) && task_on_rq_queued(p)) {
2488		update_rq_clock(rq);
2489		ns = rq_clock_task(rq) - p->se.exec_start;
2490		if ((s64)ns < 0)
2491			ns = 0;
2492	}
2493
2494	return ns;
2495}
2496
2497unsigned long long task_delta_exec(struct task_struct *p)
2498{
2499	unsigned long flags;
2500	struct rq *rq;
2501	u64 ns = 0;
2502
2503	rq = task_rq_lock(p, &flags);
2504	ns = do_task_delta_exec(p, rq);
2505	task_rq_unlock(rq, p, &flags);
2506
2507	return ns;
2508}
2509
2510/*
2511 * Return accounted runtime for the task.
2512 * In case the task is currently running, return the runtime plus current's
2513 * pending runtime that have not been accounted yet.
2514 */
2515unsigned long long task_sched_runtime(struct task_struct *p)
2516{
2517	unsigned long flags;
2518	struct rq *rq;
2519	u64 ns = 0;
2520
2521#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2522	/*
2523	 * 64-bit doesn't need locks to atomically read a 64bit value.
2524	 * So we have a optimization chance when the task's delta_exec is 0.
2525	 * Reading ->on_cpu is racy, but this is ok.
2526	 *
2527	 * If we race with it leaving cpu, we'll take a lock. So we're correct.
2528	 * If we race with it entering cpu, unaccounted time is 0. This is
2529	 * indistinguishable from the read occurring a few cycles earlier.
2530	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
2531	 * been accounted, so we're correct here as well.
2532	 */
2533	if (!p->on_cpu || !task_on_rq_queued(p))
2534		return p->se.sum_exec_runtime;
2535#endif
2536
2537	rq = task_rq_lock(p, &flags);
2538	ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
2539	task_rq_unlock(rq, p, &flags);
2540
2541	return ns;
2542}
2543
2544/*
2545 * This function gets called by the timer code, with HZ frequency.
2546 * We call it with interrupts disabled.
2547 */
2548void scheduler_tick(void)
2549{
2550	int cpu = smp_processor_id();
2551	struct rq *rq = cpu_rq(cpu);
2552	struct task_struct *curr = rq->curr;
2553
2554	sched_clock_tick();
2555
2556	raw_spin_lock(&rq->lock);
2557	update_rq_clock(rq);
2558	curr->sched_class->task_tick(rq, curr, 0);
2559	update_cpu_load_active(rq);
2560	raw_spin_unlock(&rq->lock);
2561
2562	perf_event_task_tick();
2563
2564#ifdef CONFIG_SMP
2565	rq->idle_balance = idle_cpu(cpu);
2566	trigger_load_balance(rq);
2567#endif
2568	rq_last_tick_reset(rq);
2569}
2570
2571#ifdef CONFIG_NO_HZ_FULL
2572/**
2573 * scheduler_tick_max_deferment
2574 *
2575 * Keep at least one tick per second when a single
2576 * active task is running because the scheduler doesn't
2577 * yet completely support full dynticks environment.
2578 *
2579 * This makes sure that uptime, CFS vruntime, load
2580 * balancing, etc... continue to move forward, even
2581 * with a very low granularity.
2582 *
2583 * Return: Maximum deferment in nanoseconds.
2584 */
2585u64 scheduler_tick_max_deferment(void)
2586{
2587	struct rq *rq = this_rq();
2588	unsigned long next, now = ACCESS_ONCE(jiffies);
2589
2590	next = rq->last_sched_tick + HZ;
2591
2592	if (time_before_eq(next, now))
2593		return 0;
2594
2595	return jiffies_to_nsecs(next - now);
2596}
2597#endif
2598
2599notrace unsigned long get_parent_ip(unsigned long addr)
2600{
2601	if (in_lock_functions(addr)) {
2602		addr = CALLER_ADDR2;
2603		if (in_lock_functions(addr))
2604			addr = CALLER_ADDR3;
2605	}
2606	return addr;
2607}
2608
2609#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2610				defined(CONFIG_PREEMPT_TRACER))
2611
2612void preempt_count_add(int val)
2613{
2614#ifdef CONFIG_DEBUG_PREEMPT
2615	/*
2616	 * Underflow?
2617	 */
2618	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2619		return;
2620#endif
2621	__preempt_count_add(val);
2622#ifdef CONFIG_DEBUG_PREEMPT
2623	/*
2624	 * Spinlock count overflowing soon?
2625	 */
2626	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2627				PREEMPT_MASK - 10);
2628#endif
2629	if (preempt_count() == val) {
2630		unsigned long ip = get_parent_ip(CALLER_ADDR1);
2631#ifdef CONFIG_DEBUG_PREEMPT
2632		current->preempt_disable_ip = ip;
2633#endif
2634		trace_preempt_off(CALLER_ADDR0, ip);
2635	}
2636}
2637EXPORT_SYMBOL(preempt_count_add);
2638NOKPROBE_SYMBOL(preempt_count_add);
2639
2640void preempt_count_sub(int val)
2641{
2642#ifdef CONFIG_DEBUG_PREEMPT
2643	/*
2644	 * Underflow?
2645	 */
2646	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
2647		return;
2648	/*
2649	 * Is the spinlock portion underflowing?
2650	 */
2651	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2652			!(preempt_count() & PREEMPT_MASK)))
2653		return;
2654#endif
2655
2656	if (preempt_count() == val)
2657		trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2658	__preempt_count_sub(val);
2659}
2660EXPORT_SYMBOL(preempt_count_sub);
2661NOKPROBE_SYMBOL(preempt_count_sub);
2662
2663#endif
2664
2665/*
2666 * Print scheduling while atomic bug:
2667 */
2668static noinline void __schedule_bug(struct task_struct *prev)
2669{
2670	if (oops_in_progress)
2671		return;
2672
2673	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2674		prev->comm, prev->pid, preempt_count());
2675
2676	debug_show_held_locks(prev);
2677	print_modules();
2678	if (irqs_disabled())
2679		print_irqtrace_events(prev);
2680#ifdef CONFIG_DEBUG_PREEMPT
2681	if (in_atomic_preempt_off()) {
2682		pr_err("Preemption disabled at:");
2683		print_ip_sym(current->preempt_disable_ip);
2684		pr_cont("\n");
2685	}
2686#endif
2687	dump_stack();
2688	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
2689}
2690
2691/*
2692 * Various schedule()-time debugging checks and statistics:
2693 */
2694static inline void schedule_debug(struct task_struct *prev)
2695{
2696	/*
2697	 * Test if we are atomic. Since do_exit() needs to call into
2698	 * schedule() atomically, we ignore that path. Otherwise whine
2699	 * if we are scheduling when we should not.
2700	 */
2701	if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
2702		__schedule_bug(prev);
2703	rcu_sleep_check();
2704
2705	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2706
2707	schedstat_inc(this_rq(), sched_count);
2708}
2709
2710/*
2711 * Pick up the highest-prio task:
2712 */
2713static inline struct task_struct *
2714pick_next_task(struct rq *rq, struct task_struct *prev)
2715{
2716	const struct sched_class *class = &fair_sched_class;
2717	struct task_struct *p;
2718
2719	/*
2720	 * Optimization: we know that if all tasks are in
2721	 * the fair class we can call that function directly:
2722	 */
2723	if (likely(prev->sched_class == class &&
2724		   rq->nr_running == rq->cfs.h_nr_running)) {
2725		p = fair_sched_class.pick_next_task(rq, prev);
2726		if (unlikely(p == RETRY_TASK))
2727			goto again;
2728
2729		/* assumes fair_sched_class->next == idle_sched_class */
2730		if (unlikely(!p))
2731			p = idle_sched_class.pick_next_task(rq, prev);
2732
2733		return p;
2734	}
2735
2736again:
2737	for_each_class(class) {
2738		p = class->pick_next_task(rq, prev);
2739		if (p) {
2740			if (unlikely(p == RETRY_TASK))
2741				goto again;
2742			return p;
2743		}
2744	}
2745
2746	BUG(); /* the idle class will always have a runnable task */
2747}
2748
2749/*
2750 * __schedule() is the main scheduler function.
2751 *
2752 * The main means of driving the scheduler and thus entering this function are:
2753 *
2754 *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
2755 *
2756 *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
2757 *      paths. For example, see arch/x86/entry_64.S.
2758 *
2759 *      To drive preemption between tasks, the scheduler sets the flag in timer
2760 *      interrupt handler scheduler_tick().
2761 *
2762 *   3. Wakeups don't really cause entry into schedule(). They add a
2763 *      task to the run-queue and that's it.
2764 *
2765 *      Now, if the new task added to the run-queue preempts the current
2766 *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
2767 *      called on the nearest possible occasion:
2768 *
2769 *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
2770 *
2771 *         - in syscall or exception context, at the next outmost
2772 *           preempt_enable(). (this might be as soon as the wake_up()'s
2773 *           spin_unlock()!)
2774 *
2775 *         - in IRQ context, return from interrupt-handler to
2776 *           preemptible context
2777 *
2778 *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
2779 *         then at the next:
2780 *
2781 *          - cond_resched() call
2782 *          - explicit schedule() call
2783 *          - return from syscall or exception to user-space
2784 *          - return from interrupt-handler to user-space
2785 */
2786static void __sched __schedule(void)
2787{
2788	struct task_struct *prev, *next;
2789	unsigned long *switch_count;
2790	struct rq *rq;
2791	int cpu;
2792
2793need_resched:
2794	preempt_disable();
2795	cpu = smp_processor_id();
2796	rq = cpu_rq(cpu);
2797	rcu_note_context_switch(cpu);
2798	prev = rq->curr;
2799
2800	schedule_debug(prev);
2801
2802	if (sched_feat(HRTICK))
2803		hrtick_clear(rq);
2804
2805	/*
2806	 * Make sure that signal_pending_state()->signal_pending() below
2807	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
2808	 * done by the caller to avoid the race with signal_wake_up().
2809	 */
2810	smp_mb__before_spinlock();
2811	raw_spin_lock_irq(&rq->lock);
2812
2813	switch_count = &prev->nivcsw;
2814	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2815		if (unlikely(signal_pending_state(prev->state, prev))) {
2816			prev->state = TASK_RUNNING;
2817		} else {
2818			deactivate_task(rq, prev, DEQUEUE_SLEEP);
2819			prev->on_rq = 0;
2820
2821			/*
2822			 * If a worker went to sleep, notify and ask workqueue
2823			 * whether it wants to wake up a task to maintain
2824			 * concurrency.
2825			 */
2826			if (prev->flags & PF_WQ_WORKER) {
2827				struct task_struct *to_wakeup;
2828
2829				to_wakeup = wq_worker_sleeping(prev, cpu);
2830				if (to_wakeup)
2831					try_to_wake_up_local(to_wakeup);
2832			}
2833		}
2834		switch_count = &prev->nvcsw;
2835	}
2836
2837	if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
2838		update_rq_clock(rq);
2839
2840	next = pick_next_task(rq, prev);
2841	clear_tsk_need_resched(prev);
2842	clear_preempt_need_resched();
2843	rq->skip_clock_update = 0;
2844
2845	if (likely(prev != next)) {
2846		rq->nr_switches++;
2847		rq->curr = next;
2848		++*switch_count;
2849
2850		context_switch(rq, prev, next); /* unlocks the rq */
2851		/*
2852		 * The context switch have flipped the stack from under us
2853		 * and restored the local variables which were saved when
2854		 * this task called schedule() in the past. prev == current
2855		 * is still correct, but it can be moved to another cpu/rq.
2856		 */
2857		cpu = smp_processor_id();
2858		rq = cpu_rq(cpu);
2859	} else
2860		raw_spin_unlock_irq(&rq->lock);
2861
2862	post_schedule(rq);
2863
2864	sched_preempt_enable_no_resched();
2865	if (need_resched())
2866		goto need_resched;
2867}
2868
2869static inline void sched_submit_work(struct task_struct *tsk)
2870{
2871	if (!tsk->state || tsk_is_pi_blocked(tsk))
2872		return;
2873	/*
2874	 * If we are going to sleep and we have plugged IO queued,
2875	 * make sure to submit it to avoid deadlocks.
2876	 */
2877	if (blk_needs_flush_plug(tsk))
2878		blk_schedule_flush_plug(tsk);
2879}
2880
2881asmlinkage __visible void __sched schedule(void)
2882{
2883	struct task_struct *tsk = current;
2884
2885	sched_submit_work(tsk);
2886	__schedule();
2887}
2888EXPORT_SYMBOL(schedule);
2889
2890#ifdef CONFIG_CONTEXT_TRACKING
2891asmlinkage __visible void __sched schedule_user(void)
2892{
2893	/*
2894	 * If we come here after a random call to set_need_resched(),
2895	 * or we have been woken up remotely but the IPI has not yet arrived,
2896	 * we haven't yet exited the RCU idle mode. Do it here manually until
2897	 * we find a better solution.
2898	 */
2899	user_exit();
2900	schedule();
2901	user_enter();
2902}
2903#endif
2904
2905/**
2906 * schedule_preempt_disabled - called with preemption disabled
2907 *
2908 * Returns with preemption disabled. Note: preempt_count must be 1
2909 */
2910void __sched schedule_preempt_disabled(void)
2911{
2912	sched_preempt_enable_no_resched();
2913	schedule();
2914	preempt_disable();
2915}
2916
2917#ifdef CONFIG_PREEMPT
2918/*
2919 * this is the entry point to schedule() from in-kernel preemption
2920 * off of preempt_enable. Kernel preemptions off return from interrupt
2921 * occur there and call schedule directly.
2922 */
2923asmlinkage __visible void __sched notrace preempt_schedule(void)
2924{
2925	/*
2926	 * If there is a non-zero preempt_count or interrupts are disabled,
2927	 * we do not want to preempt the current task. Just return..
2928	 */
2929	if (likely(!preemptible()))
2930		return;
2931
2932	do {
2933		__preempt_count_add(PREEMPT_ACTIVE);
2934		__schedule();
2935		__preempt_count_sub(PREEMPT_ACTIVE);
2936
2937		/*
2938		 * Check again in case we missed a preemption opportunity
2939		 * between schedule and now.
2940		 */
2941		barrier();
2942	} while (need_resched());
2943}
2944NOKPROBE_SYMBOL(preempt_schedule);
2945EXPORT_SYMBOL(preempt_schedule);
2946#endif /* CONFIG_PREEMPT */
2947
2948/*
2949 * this is the entry point to schedule() from kernel preemption
2950 * off of irq context.
2951 * Note, that this is called and return with irqs disabled. This will
2952 * protect us against recursive calling from irq.
2953 */
2954asmlinkage __visible void __sched preempt_schedule_irq(void)
2955{
2956	enum ctx_state prev_state;
2957
2958	/* Catch callers which need to be fixed */
2959	BUG_ON(preempt_count() || !irqs_disabled());
2960
2961	prev_state = exception_enter();
2962
2963	do {
2964		__preempt_count_add(PREEMPT_ACTIVE);
2965		local_irq_enable();
2966		__schedule();
2967		local_irq_disable();
2968		__preempt_count_sub(PREEMPT_ACTIVE);
2969
2970		/*
2971		 * Check again in case we missed a preemption opportunity
2972		 * between schedule and now.
2973		 */
2974		barrier();
2975	} while (need_resched());
2976
2977	exception_exit(prev_state);
2978}
2979
2980int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
2981			  void *key)
2982{
2983	return try_to_wake_up(curr->private, mode, wake_flags);
2984}
2985EXPORT_SYMBOL(default_wake_function);
2986
2987#ifdef CONFIG_RT_MUTEXES
2988
2989/*
2990 * rt_mutex_setprio - set the current priority of a task
2991 * @p: task
2992 * @prio: prio value (kernel-internal form)
2993 *
2994 * This function changes the 'effective' priority of a task. It does
2995 * not touch ->normal_prio like __setscheduler().
2996 *
2997 * Used by the rt_mutex code to implement priority inheritance
2998 * logic. Call site only calls if the priority of the task changed.
2999 */
3000void rt_mutex_setprio(struct task_struct *p, int prio)
3001{
3002	int oldprio, queued, running, enqueue_flag = 0;
3003	struct rq *rq;
3004	const struct sched_class *prev_class;
3005
3006	BUG_ON(prio > MAX_PRIO);
3007
3008	rq = __task_rq_lock(p);
3009
3010	/*
3011	 * Idle task boosting is a nono in general. There is one
3012	 * exception, when PREEMPT_RT and NOHZ is active:
3013	 *
3014	 * The idle task calls get_next_timer_interrupt() and holds
3015	 * the timer wheel base->lock on the CPU and another CPU wants
3016	 * to access the timer (probably to cancel it). We can safely
3017	 * ignore the boosting request, as the idle CPU runs this code
3018	 * with interrupts disabled and will complete the lock
3019	 * protected section without being interrupted. So there is no
3020	 * real need to boost.
3021	 */
3022	if (unlikely(p == rq->idle)) {
3023		WARN_ON(p != rq->curr);
3024		WARN_ON(p->pi_blocked_on);
3025		goto out_unlock;
3026	}
3027
3028	trace_sched_pi_setprio(p, prio);
3029	oldprio = p->prio;
3030	prev_class = p->sched_class;
3031	queued = task_on_rq_queued(p);
3032	running = task_current(rq, p);
3033	if (queued)
3034		dequeue_task(rq, p, 0);
3035	if (running)
3036		put_prev_task(rq, p);
3037
3038	/*
3039	 * Boosting condition are:
3040	 * 1. -rt task is running and holds mutex A
3041	 *      --> -dl task blocks on mutex A
3042	 *
3043	 * 2. -dl task is running and holds mutex A
3044	 *      --> -dl task blocks on mutex A and could preempt the
3045	 *          running task
3046	 */
3047	if (dl_prio(prio)) {
3048		struct task_struct *pi_task = rt_mutex_get_top_task(p);
3049		if (!dl_prio(p->normal_prio) ||
3050		    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3051			p->dl.dl_boosted = 1;
3052			p->dl.dl_throttled = 0;
3053			enqueue_flag = ENQUEUE_REPLENISH;
3054		} else
3055			p->dl.dl_boosted = 0;
3056		p->sched_class = &dl_sched_class;
3057	} else if (rt_prio(prio)) {
3058		if (dl_prio(oldprio))
3059			p->dl.dl_boosted = 0;
3060		if (oldprio < prio)
3061			enqueue_flag = ENQUEUE_HEAD;
3062		p->sched_class = &rt_sched_class;
3063	} else {
3064		if (dl_prio(oldprio))
3065			p->dl.dl_boosted = 0;
3066		p->sched_class = &fair_sched_class;
3067	}
3068
3069	p->prio = prio;
3070
3071	if (running)
3072		p->sched_class->set_curr_task(rq);
3073	if (queued)
3074		enqueue_task(rq, p, enqueue_flag);
3075
3076	check_class_changed(rq, p, prev_class, oldprio);
3077out_unlock:
3078	__task_rq_unlock(rq);
3079}
3080#endif
3081
3082void set_user_nice(struct task_struct *p, long nice)
3083{
3084	int old_prio, delta, queued;
3085	unsigned long flags;
3086	struct rq *rq;
3087
3088	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
3089		return;
3090	/*
3091	 * We have to be careful, if called from sys_setpriority(),
3092	 * the task might be in the middle of scheduling on another CPU.
3093	 */
3094	rq = task_rq_lock(p, &flags);
3095	/*
3096	 * The RT priorities are set via sched_setscheduler(), but we still
3097	 * allow the 'normal' nice value to be set - but as expected
3098	 * it wont have any effect on scheduling until the task is
3099	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
3100	 */
3101	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3102		p->static_prio = NICE_TO_PRIO(nice);
3103		goto out_unlock;
3104	}
3105	queued = task_on_rq_queued(p);
3106	if (queued)
3107		dequeue_task(rq, p, 0);
3108
3109	p->static_prio = NICE_TO_PRIO(nice);
3110	set_load_weight(p);
3111	old_prio = p->prio;
3112	p->prio = effective_prio(p);
3113	delta = p->prio - old_prio;
3114
3115	if (queued) {
3116		enqueue_task(rq, p, 0);
3117		/*
3118		 * If the task increased its priority or is running and
3119		 * lowered its priority, then reschedule its CPU:
3120		 */
3121		if (delta < 0 || (delta > 0 && task_running(rq, p)))
3122			resched_curr(rq);
3123	}
3124out_unlock:
3125	task_rq_unlock(rq, p, &flags);
3126}
3127EXPORT_SYMBOL(set_user_nice);
3128
3129/*
3130 * can_nice - check if a task can reduce its nice value
3131 * @p: task
3132 * @nice: nice value
3133 */
3134int can_nice(const struct task_struct *p, const int nice)
3135{
3136	/* convert nice value [19,-20] to rlimit style value [1,40] */
3137	int nice_rlim = nice_to_rlimit(nice);
3138
3139	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3140		capable(CAP_SYS_NICE));
3141}
3142
3143#ifdef __ARCH_WANT_SYS_NICE
3144
3145/*
3146 * sys_nice - change the priority of the current process.
3147 * @increment: priority increment
3148 *
3149 * sys_setpriority is a more generic, but much slower function that
3150 * does similar things.
3151 */
3152SYSCALL_DEFINE1(nice, int, increment)
3153{
3154	long nice, retval;
3155
3156	/*
3157	 * Setpriority might change our priority at the same moment.
3158	 * We don't have to worry. Conceptually one call occurs first
3159	 * and we have a single winner.
3160	 */
3161	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
3162	nice = task_nice(current) + increment;
3163
3164	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
3165	if (increment < 0 && !can_nice(current, nice))
3166		return -EPERM;
3167
3168	retval = security_task_setnice(current, nice);
3169	if (retval)
3170		return retval;
3171
3172	set_user_nice(current, nice);
3173	return 0;
3174}
3175
3176#endif
3177
3178/**
3179 * task_prio - return the priority value of a given task.
3180 * @p: the task in question.
3181 *
3182 * Return: The priority value as seen by users in /proc.
3183 * RT tasks are offset by -200. Normal tasks are centered
3184 * around 0, value goes from -16 to +15.
3185 */
3186int task_prio(const struct task_struct *p)
3187{
3188	return p->prio - MAX_RT_PRIO;
3189}
3190
3191/**
3192 * idle_cpu - is a given cpu idle currently?
3193 * @cpu: the processor in question.
3194 *
3195 * Return: 1 if the CPU is currently idle. 0 otherwise.
3196 */
3197int idle_cpu(int cpu)
3198{
3199	struct rq *rq = cpu_rq(cpu);
3200
3201	if (rq->curr != rq->idle)
3202		return 0;
3203
3204	if (rq->nr_running)
3205		return 0;
3206
3207#ifdef CONFIG_SMP
3208	if (!llist_empty(&rq->wake_list))
3209		return 0;
3210#endif
3211
3212	return 1;
3213}
3214
3215/**
3216 * idle_task - return the idle task for a given cpu.
3217 * @cpu: the processor in question.
3218 *
3219 * Return: The idle task for the cpu @cpu.
3220 */
3221struct task_struct *idle_task(int cpu)
3222{
3223	return cpu_rq(cpu)->idle;
3224}
3225
3226/**
3227 * find_process_by_pid - find a process with a matching PID value.
3228 * @pid: the pid in question.
3229 *
3230 * The task of @pid, if found. %NULL otherwise.
3231 */
3232static struct task_struct *find_process_by_pid(pid_t pid)
3233{
3234	return pid ? find_task_by_vpid(pid) : current;
3235}
3236
3237/*
3238 * This function initializes the sched_dl_entity of a newly becoming
3239 * SCHED_DEADLINE task.
3240 *
3241 * Only the static values are considered here, the actual runtime and the
3242 * absolute deadline will be properly calculated when the task is enqueued
3243 * for the first time with its new policy.
3244 */
3245static void
3246__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3247{
3248	struct sched_dl_entity *dl_se = &p->dl;
3249
3250	init_dl_task_timer(dl_se);
3251	dl_se->dl_runtime = attr->sched_runtime;
3252	dl_se->dl_deadline = attr->sched_deadline;
3253	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3254	dl_se->flags = attr->sched_flags;
3255	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3256	dl_se->dl_throttled = 0;
3257	dl_se->dl_new = 1;
3258	dl_se->dl_yielded = 0;
3259}
3260
3261/*
3262 * sched_setparam() passes in -1 for its policy, to let the functions
3263 * it calls know not to change it.
3264 */
3265#define SETPARAM_POLICY	-1
3266
3267static void __setscheduler_params(struct task_struct *p,
3268		const struct sched_attr *attr)
3269{
3270	int policy = attr->sched_policy;
3271
3272	if (policy == SETPARAM_POLICY)
3273		policy = p->policy;
3274
3275	p->policy = policy;
3276
3277	if (dl_policy(policy))
3278		__setparam_dl(p, attr);
3279	else if (fair_policy(policy))
3280		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3281
3282	/*
3283	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
3284	 * !rt_policy. Always setting this ensures that things like
3285	 * getparam()/getattr() don't report silly values for !rt tasks.
3286	 */
3287	p->rt_priority = attr->sched_priority;
3288	p->normal_prio = normal_prio(p);
3289	set_load_weight(p);
3290}
3291
3292/* Actually do priority change: must hold pi & rq lock. */
3293static void __setscheduler(struct rq *rq, struct task_struct *p,
3294			   const struct sched_attr *attr)
3295{
3296	__setscheduler_params(p, attr);
3297
3298	/*
3299	 * If we get here, there was no pi waiters boosting the
3300	 * task. It is safe to use the normal prio.
3301	 */
3302	p->prio = normal_prio(p);
3303
3304	if (dl_prio(p->prio))
3305		p->sched_class = &dl_sched_class;
3306	else if (rt_prio(p->prio))
3307		p->sched_class = &rt_sched_class;
3308	else
3309		p->sched_class = &fair_sched_class;
3310}
3311
3312static void
3313__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3314{
3315	struct sched_dl_entity *dl_se = &p->dl;
3316
3317	attr->sched_priority = p->rt_priority;
3318	attr->sched_runtime = dl_se->dl_runtime;
3319	attr->sched_deadline = dl_se->dl_deadline;
3320	attr->sched_period = dl_se->dl_period;
3321	attr->sched_flags = dl_se->flags;
3322}
3323
3324/*
3325 * This function validates the new parameters of a -deadline task.
3326 * We ask for the deadline not being zero, and greater or equal
3327 * than the runtime, as well as the period of being zero or
3328 * greater than deadline. Furthermore, we have to be sure that
3329 * user parameters are above the internal resolution of 1us (we
3330 * check sched_runtime only since it is always the smaller one) and
3331 * below 2^63 ns (we have to check both sched_deadline and
3332 * sched_period, as the latter can be zero).
3333 */
3334static bool
3335__checkparam_dl(const struct sched_attr *attr)
3336{
3337	/* deadline != 0 */
3338	if (attr->sched_deadline == 0)
3339		return false;
3340
3341	/*
3342	 * Since we truncate DL_SCALE bits, make sure we're at least
3343	 * that big.
3344	 */
3345	if (attr->sched_runtime < (1ULL << DL_SCALE))
3346		return false;
3347
3348	/*
3349	 * Since we use the MSB for wrap-around and sign issues, make
3350	 * sure it's not set (mind that period can be equal to zero).
3351	 */
3352	if (attr->sched_deadline & (1ULL << 63) ||
3353	    attr->sched_period & (1ULL << 63))
3354		return false;
3355
3356	/* runtime <= deadline <= period (if period != 0) */
3357	if ((attr->sched_period != 0 &&
3358	     attr->sched_period < attr->sched_deadline) ||
3359	    attr->sched_deadline < attr->sched_runtime)
3360		return false;
3361
3362	return true;
3363}
3364
3365/*
3366 * check the target process has a UID that matches the current process's
3367 */
3368static bool check_same_owner(struct task_struct *p)
3369{
3370	const struct cred *cred = current_cred(), *pcred;
3371	bool match;
3372
3373	rcu_read_lock();
3374	pcred = __task_cred(p);
3375	match = (uid_eq(cred->euid, pcred->euid) ||
3376		 uid_eq(cred->euid, pcred->uid));
3377	rcu_read_unlock();
3378	return match;
3379}
3380
3381static int __sched_setscheduler(struct task_struct *p,
3382				const struct sched_attr *attr,
3383				bool user)
3384{
3385	int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3386		      MAX_RT_PRIO - 1 - attr->sched_priority;
3387	int retval, oldprio, oldpolicy = -1, queued, running;
3388	int policy = attr->sched_policy;
3389	unsigned long flags;
3390	const struct sched_class *prev_class;
3391	struct rq *rq;
3392	int reset_on_fork;
3393
3394	/* may grab non-irq protected spin_locks */
3395	BUG_ON(in_interrupt());
3396recheck:
3397	/* double check policy once rq lock held */
3398	if (policy < 0) {
3399		reset_on_fork = p->sched_reset_on_fork;
3400		policy = oldpolicy = p->policy;
3401	} else {
3402		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
3403
3404		if (policy != SCHED_DEADLINE &&
3405				policy != SCHED_FIFO && policy != SCHED_RR &&
3406				policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3407				policy != SCHED_IDLE)
3408			return -EINVAL;
3409	}
3410
3411	if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
3412		return -EINVAL;
3413
3414	/*
3415	 * Valid priorities for SCHED_FIFO and SCHED_RR are
3416	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3417	 * SCHED_BATCH and SCHED_IDLE is 0.
3418	 */
3419	if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
3420	    (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
3421		return -EINVAL;
3422	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3423	    (rt_policy(policy) != (attr->sched_priority != 0)))
3424		return -EINVAL;
3425
3426	/*
3427	 * Allow unprivileged RT tasks to decrease priority:
3428	 */
3429	if (user && !capable(CAP_SYS_NICE)) {
3430		if (fair_policy(policy)) {
3431			if (attr->sched_nice < task_nice(p) &&
3432			    !can_nice(p, attr->sched_nice))
3433				return -EPERM;
3434		}
3435
3436		if (rt_policy(policy)) {
3437			unsigned long rlim_rtprio =
3438					task_rlimit(p, RLIMIT_RTPRIO);
3439
3440			/* can't set/change the rt policy */
3441			if (policy != p->policy && !rlim_rtprio)
3442				return -EPERM;
3443
3444			/* can't increase priority */
3445			if (attr->sched_priority > p->rt_priority &&
3446			    attr->sched_priority > rlim_rtprio)
3447				return -EPERM;
3448		}
3449
3450		 /*
3451		  * Can't set/change SCHED_DEADLINE policy at all for now
3452		  * (safest behavior); in the future we would like to allow
3453		  * unprivileged DL tasks to increase their relative deadline
3454		  * or reduce their runtime (both ways reducing utilization)
3455		  */
3456		if (dl_policy(policy))
3457			return -EPERM;
3458
3459		/*
3460		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3461		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3462		 */
3463		if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3464			if (!can_nice(p, task_nice(p)))
3465				return -EPERM;
3466		}
3467
3468		/* can't change other user's priorities */
3469		if (!check_same_owner(p))
3470			return -EPERM;
3471
3472		/* Normal users shall not reset the sched_reset_on_fork flag */
3473		if (p->sched_reset_on_fork && !reset_on_fork)
3474			return -EPERM;
3475	}
3476
3477	if (user) {
3478		retval = security_task_setscheduler(p);
3479		if (retval)
3480			return retval;
3481	}
3482
3483	/*
3484	 * make sure no PI-waiters arrive (or leave) while we are
3485	 * changing the priority of the task:
3486	 *
3487	 * To be able to change p->policy safely, the appropriate
3488	 * runqueue lock must be held.
3489	 */
3490	rq = task_rq_lock(p, &flags);
3491
3492	/*
3493	 * Changing the policy of the stop threads its a very bad idea
3494	 */
3495	if (p == rq->stop) {
3496		task_rq_unlock(rq, p, &flags);
3497		return -EINVAL;
3498	}
3499
3500	/*
3501	 * If not changing anything there's no need to proceed further,
3502	 * but store a possible modification of reset_on_fork.
3503	 */
3504	if (unlikely(policy == p->policy)) {
3505		if (fair_policy(policy) && attr->sched_nice != task_nice(p))
3506			goto change;
3507		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3508			goto change;
3509		if (dl_policy(policy))
3510			goto change;
3511
3512		p->sched_reset_on_fork = reset_on_fork;
3513		task_rq_unlock(rq, p, &flags);
3514		return 0;
3515	}
3516change:
3517
3518	if (user) {
3519#ifdef CONFIG_RT_GROUP_SCHED
3520		/*
3521		 * Do not allow realtime tasks into groups that have no runtime
3522		 * assigned.
3523		 */
3524		if (rt_bandwidth_enabled() && rt_policy(policy) &&
3525				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3526				!task_group_is_autogroup(task_group(p))) {
3527			task_rq_unlock(rq, p, &flags);
3528			return -EPERM;
3529		}
3530#endif
3531#ifdef CONFIG_SMP
3532		if (dl_bandwidth_enabled() && dl_policy(policy)) {
3533			cpumask_t *span = rq->rd->span;
3534
3535			/*
3536			 * Don't allow tasks with an affinity mask smaller than
3537			 * the entire root_domain to become SCHED_DEADLINE. We
3538			 * will also fail if there's no bandwidth available.
3539			 */
3540			if (!cpumask_subset(span, &p->cpus_allowed) ||
3541			    rq->rd->dl_bw.bw == 0) {
3542				task_rq_unlock(rq, p, &flags);
3543				return -EPERM;
3544			}
3545		}
3546#endif
3547	}
3548
3549	/* recheck policy now with rq lock held */
3550	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3551		policy = oldpolicy = -1;
3552		task_rq_unlock(rq, p, &flags);
3553		goto recheck;
3554	}
3555
3556	/*
3557	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
3558	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
3559	 * is available.
3560	 */
3561	if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
3562		task_rq_unlock(rq, p, &flags);
3563		return -EBUSY;
3564	}
3565
3566	p->sched_reset_on_fork = reset_on_fork;
3567	oldprio = p->prio;
3568
3569	/*
3570	 * Special case for priority boosted tasks.
3571	 *
3572	 * If the new priority is lower or equal (user space view)
3573	 * than the current (boosted) priority, we just store the new
3574	 * normal parameters and do not touch the scheduler class and
3575	 * the runqueue. This will be done when the task deboost
3576	 * itself.
3577	 */
3578	if (rt_mutex_check_prio(p, newprio)) {
3579		__setscheduler_params(p, attr);
3580		task_rq_unlock(rq, p, &flags);
3581		return 0;
3582	}
3583
3584	queued = task_on_rq_queued(p);
3585	running = task_current(rq, p);
3586	if (queued)
3587		dequeue_task(rq, p, 0);
3588	if (running)
3589		put_prev_task(rq, p);
3590
3591	prev_class = p->sched_class;
3592	__setscheduler(rq, p, attr);
3593
3594	if (running)
3595		p->sched_class->set_curr_task(rq);
3596	if (queued) {
3597		/*
3598		 * We enqueue to tail when the priority of a task is
3599		 * increased (user space view).
3600		 */
3601		enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
3602	}
3603
3604	check_class_changed(rq, p, prev_class, oldprio);
3605	task_rq_unlock(rq, p, &flags);
3606
3607	rt_mutex_adjust_pi(p);
3608
3609	return 0;
3610}
3611
3612static int _sched_setscheduler(struct task_struct *p, int policy,
3613			       const struct sched_param *param, bool check)
3614{
3615	struct sched_attr attr = {
3616		.sched_policy   = policy,
3617		.sched_priority = param->sched_priority,
3618		.sched_nice	= PRIO_TO_NICE(p->static_prio),
3619	};
3620
3621	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
3622	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
3623		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
3624		policy &= ~SCHED_RESET_ON_FORK;
3625		attr.sched_policy = policy;
3626	}
3627
3628	return __sched_setscheduler(p, &attr, check);
3629}
3630/**
3631 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3632 * @p: the task in question.
3633 * @policy: new policy.
3634 * @param: structure containing the new RT priority.
3635 *
3636 * Return: 0 on success. An error code otherwise.
3637 *
3638 * NOTE that the task may be already dead.
3639 */
3640int sched_setscheduler(struct task_struct *p, int policy,
3641		       const struct sched_param *param)
3642{
3643	return _sched_setscheduler(p, policy, param, true);
3644}
3645EXPORT_SYMBOL_GPL(sched_setscheduler);
3646
3647int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
3648{
3649	return __sched_setscheduler(p, attr, true);
3650}
3651EXPORT_SYMBOL_GPL(sched_setattr);
3652
3653/**
3654 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
3655 * @p: the task in question.
3656 * @policy: new policy.
3657 * @param: structure containing the new RT priority.
3658 *
3659 * Just like sched_setscheduler, only don't bother checking if the
3660 * current context has permission.  For example, this is needed in
3661 * stop_machine(): we create temporary high priority worker threads,
3662 * but our caller might not have that capability.
3663 *
3664 * Return: 0 on success. An error code otherwise.
3665 */
3666int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3667			       const struct sched_param *param)
3668{
3669	return _sched_setscheduler(p, policy, param, false);
3670}
3671
3672static int
3673do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3674{
3675	struct sched_param lparam;
3676	struct task_struct *p;
3677	int retval;
3678
3679	if (!param || pid < 0)
3680		return -EINVAL;
3681	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3682		return -EFAULT;
3683
3684	rcu_read_lock();
3685	retval = -ESRCH;
3686	p = find_process_by_pid(pid);
3687	if (p != NULL)
3688		retval = sched_setscheduler(p, policy, &lparam);
3689	rcu_read_unlock();
3690
3691	return retval;
3692}
3693
3694/*
3695 * Mimics kernel/events/core.c perf_copy_attr().
3696 */
3697static int sched_copy_attr(struct sched_attr __user *uattr,
3698			   struct sched_attr *attr)
3699{
3700	u32 size;
3701	int ret;
3702
3703	if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
3704		return -EFAULT;
3705
3706	/*
3707	 * zero the full structure, so that a short copy will be nice.
3708	 */
3709	memset(attr, 0, sizeof(*attr));
3710
3711	ret = get_user(size, &uattr->size);
3712	if (ret)
3713		return ret;
3714
3715	if (size > PAGE_SIZE)	/* silly large */
3716		goto err_size;
3717
3718	if (!size)		/* abi compat */
3719		size = SCHED_ATTR_SIZE_VER0;
3720
3721	if (size < SCHED_ATTR_SIZE_VER0)
3722		goto err_size;
3723
3724	/*
3725	 * If we're handed a bigger struct than we know of,
3726	 * ensure all the unknown bits are 0 - i.e. new
3727	 * user-space does not rely on any kernel feature
3728	 * extensions we dont know about yet.
3729	 */
3730	if (size > sizeof(*attr)) {
3731		unsigned char __user *addr;
3732		unsigned char __user *end;
3733		unsigned char val;
3734
3735		addr = (void __user *)uattr + sizeof(*attr);
3736		end  = (void __user *)uattr + size;
3737
3738		for (; addr < end; addr++) {
3739			ret = get_user(val, addr);
3740			if (ret)
3741				return ret;
3742			if (val)
3743				goto err_size;
3744		}
3745		size = sizeof(*attr);
3746	}
3747
3748	ret = copy_from_user(attr, uattr, size);
3749	if (ret)
3750		return -EFAULT;
3751
3752	/*
3753	 * XXX: do we want to be lenient like existing syscalls; or do we want
3754	 * to be strict and return an error on out-of-bounds values?
3755	 */
3756	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
3757
3758	return 0;
3759
3760err_size:
3761	put_user(sizeof(*attr), &uattr->size);
3762	return -E2BIG;
3763}
3764
3765/**
3766 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3767 * @pid: the pid in question.
3768 * @policy: new policy.
3769 * @param: structure containing the new RT priority.
3770 *
3771 * Return: 0 on success. An error code otherwise.
3772 */
3773SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3774		struct sched_param __user *, param)
3775{
3776	/* negative values for policy are not valid */
3777	if (policy < 0)
3778		return -EINVAL;
3779
3780	return do_sched_setscheduler(pid, policy, param);
3781}
3782
3783/**
3784 * sys_sched_setparam - set/change the RT priority of a thread
3785 * @pid: the pid in question.
3786 * @param: structure containing the new RT priority.
3787 *
3788 * Return: 0 on success. An error code otherwise.
3789 */
3790SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3791{
3792	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
3793}
3794
3795/**
3796 * sys_sched_setattr - same as above, but with extended sched_attr
3797 * @pid: the pid in question.
3798 * @uattr: structure containing the extended parameters.
3799 * @flags: for future extension.
3800 */
3801SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3802			       unsigned int, flags)
3803{
3804	struct sched_attr attr;
3805	struct task_struct *p;
3806	int retval;
3807
3808	if (!uattr || pid < 0 || flags)
3809		return -EINVAL;
3810
3811	retval = sched_copy_attr(uattr, &attr);
3812	if (retval)
3813		return retval;
3814
3815	if ((int)attr.sched_policy < 0)
3816		return -EINVAL;
3817
3818	rcu_read_lock();
3819	retval = -ESRCH;
3820	p = find_process_by_pid(pid);
3821	if (p != NULL)
3822		retval = sched_setattr(p, &attr);
3823	rcu_read_unlock();
3824
3825	return retval;
3826}
3827
3828/**
3829 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3830 * @pid: the pid in question.
3831 *
3832 * Return: On success, the policy of the thread. Otherwise, a negative error
3833 * code.
3834 */
3835SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3836{
3837	struct task_struct *p;
3838	int retval;
3839
3840	if (pid < 0)
3841		return -EINVAL;
3842
3843	retval = -ESRCH;
3844	rcu_read_lock();
3845	p = find_process_by_pid(pid);
3846	if (p) {
3847		retval = security_task_getscheduler(p);
3848		if (!retval)
3849			retval = p->policy
3850				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
3851	}
3852	rcu_read_unlock();
3853	return retval;
3854}
3855
3856/**
3857 * sys_sched_getparam - get the RT priority of a thread
3858 * @pid: the pid in question.
3859 * @param: structure containing the RT priority.
3860 *
3861 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
3862 * code.
3863 */
3864SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3865{
3866	struct sched_param lp = { .sched_priority = 0 };
3867	struct task_struct *p;
3868	int retval;
3869
3870	if (!param || pid < 0)
3871		return -EINVAL;
3872
3873	rcu_read_lock();
3874	p = find_process_by_pid(pid);
3875	retval = -ESRCH;
3876	if (!p)
3877		goto out_unlock;
3878
3879	retval = security_task_getscheduler(p);
3880	if (retval)
3881		goto out_unlock;
3882
3883	if (task_has_rt_policy(p))
3884		lp.sched_priority = p->rt_priority;
3885	rcu_read_unlock();
3886
3887	/*
3888	 * This one might sleep, we cannot do it with a spinlock held ...
3889	 */
3890	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3891
3892	return retval;
3893
3894out_unlock:
3895	rcu_read_unlock();
3896	return retval;
3897}
3898
3899static int sched_read_attr(struct sched_attr __user *uattr,
3900			   struct sched_attr *attr,
3901			   unsigned int usize)
3902{
3903	int ret;
3904
3905	if (!access_ok(VERIFY_WRITE, uattr, usize))
3906		return -EFAULT;
3907
3908	/*
3909	 * If we're handed a smaller struct than we know of,
3910	 * ensure all the unknown bits are 0 - i.e. old
3911	 * user-space does not get uncomplete information.
3912	 */
3913	if (usize < sizeof(*attr)) {
3914		unsigned char *addr;
3915		unsigned char *end;
3916
3917		addr = (void *)attr + usize;
3918		end  = (void *)attr + sizeof(*attr);
3919
3920		for (; addr < end; addr++) {
3921			if (*addr)
3922				return -EFBIG;
3923		}
3924
3925		attr->size = usize;
3926	}
3927
3928	ret = copy_to_user(uattr, attr, attr->size);
3929	if (ret)
3930		return -EFAULT;
3931
3932	return 0;
3933}
3934
3935/**
3936 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
3937 * @pid: the pid in question.
3938 * @uattr: structure containing the extended parameters.
3939 * @size: sizeof(attr) for fwd/bwd comp.
3940 * @flags: for future extension.
3941 */
3942SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3943		unsigned int, size, unsigned int, flags)
3944{
3945	struct sched_attr attr = {
3946		.size = sizeof(struct sched_attr),
3947	};
3948	struct task_struct *p;
3949	int retval;
3950
3951	if (!uattr || pid < 0 || size > PAGE_SIZE ||
3952	    size < SCHED_ATTR_SIZE_VER0 || flags)
3953		return -EINVAL;
3954
3955	rcu_read_lock();
3956	p = find_process_by_pid(pid);
3957	retval = -ESRCH;
3958	if (!p)
3959		goto out_unlock;
3960
3961	retval = security_task_getscheduler(p);
3962	if (retval)
3963		goto out_unlock;
3964
3965	attr.sched_policy = p->policy;
3966	if (p->sched_reset_on_fork)
3967		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
3968	if (task_has_dl_policy(p))
3969		__getparam_dl(p, &attr);
3970	else if (task_has_rt_policy(p))
3971		attr.sched_priority = p->rt_priority;
3972	else
3973		attr.sched_nice = task_nice(p);
3974
3975	rcu_read_unlock();
3976
3977	retval = sched_read_attr(uattr, &attr, size);
3978	return retval;
3979
3980out_unlock:
3981	rcu_read_unlock();
3982	return retval;
3983}
3984
3985long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
3986{
3987	cpumask_var_t cpus_allowed, new_mask;
3988	struct task_struct *p;
3989	int retval;
3990
3991	rcu_read_lock();
3992
3993	p = find_process_by_pid(pid);
3994	if (!p) {
3995		rcu_read_unlock();
3996		return -ESRCH;
3997	}
3998
3999	/* Prevent p going away */
4000	get_task_struct(p);
4001	rcu_read_unlock();
4002
4003	if (p->flags & PF_NO_SETAFFINITY) {
4004		retval = -EINVAL;
4005		goto out_put_task;
4006	}
4007	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4008		retval = -ENOMEM;
4009		goto out_put_task;
4010	}
4011	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4012		retval = -ENOMEM;
4013		goto out_free_cpus_allowed;
4014	}
4015	retval = -EPERM;
4016	if (!check_same_owner(p)) {
4017		rcu_read_lock();
4018		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4019			rcu_read_unlock();
4020			goto out_unlock;
4021		}
4022		rcu_read_unlock();
4023	}
4024
4025	retval = security_task_setscheduler(p);
4026	if (retval)
4027		goto out_unlock;
4028
4029
4030	cpuset_cpus_allowed(p, cpus_allowed);
4031	cpumask_and(new_mask, in_mask, cpus_allowed);
4032
4033	/*
4034	 * Since bandwidth control happens on root_domain basis,
4035	 * if admission test is enabled, we only admit -deadline
4036	 * tasks allowed to run on all the CPUs in the task's
4037	 * root_domain.
4038	 */
4039#ifdef CONFIG_SMP
4040	if (task_has_dl_policy(p)) {
4041		const struct cpumask *span = task_rq(p)->rd->span;
4042
4043		if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
4044			retval = -EBUSY;
4045			goto out_unlock;
4046		}
4047	}
4048#endif
4049again:
4050	retval = set_cpus_allowed_ptr(p, new_mask);
4051
4052	if (!retval) {
4053		cpuset_cpus_allowed(p, cpus_allowed);
4054		if (!cpumask_subset(new_mask, cpus_allowed)) {
4055			/*
4056			 * We must have raced with a concurrent cpuset
4057			 * update. Just reset the cpus_allowed to the
4058			 * cpuset's cpus_allowed
4059			 */
4060			cpumask_copy(new_mask, cpus_allowed);
4061			goto again;
4062		}
4063	}
4064out_unlock:
4065	free_cpumask_var(new_mask);
4066out_free_cpus_allowed:
4067	free_cpumask_var(cpus_allowed);
4068out_put_task:
4069	put_task_struct(p);
4070	return retval;
4071}
4072
4073static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4074			     struct cpumask *new_mask)
4075{
4076	if (len < cpumask_size())
4077		cpumask_clear(new_mask);
4078	else if (len > cpumask_size())
4079		len = cpumask_size();
4080
4081	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4082}
4083
4084/**
4085 * sys_sched_setaffinity - set the cpu affinity of a process
4086 * @pid: pid of the process
4087 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4088 * @user_mask_ptr: user-space pointer to the new cpu mask
4089 *
4090 * Return: 0 on success. An error code otherwise.
4091 */
4092SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4093		unsigned long __user *, user_mask_ptr)
4094{
4095	cpumask_var_t new_mask;
4096	int retval;
4097
4098	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4099		return -ENOMEM;
4100
4101	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4102	if (retval == 0)
4103		retval = sched_setaffinity(pid, new_mask);
4104	free_cpumask_var(new_mask);
4105	return retval;
4106}
4107
4108long sched_getaffinity(pid_t pid, struct cpumask *mask)
4109{
4110	struct task_struct *p;
4111	unsigned long flags;
4112	int retval;
4113
4114	rcu_read_lock();
4115
4116	retval = -ESRCH;
4117	p = find_process_by_pid(pid);
4118	if (!p)
4119		goto out_unlock;
4120
4121	retval = security_task_getscheduler(p);
4122	if (retval)
4123		goto out_unlock;
4124
4125	raw_spin_lock_irqsave(&p->pi_lock, flags);
4126	cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
4127	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4128
4129out_unlock:
4130	rcu_read_unlock();
4131
4132	return retval;
4133}
4134
4135/**
4136 * sys_sched_getaffinity - get the cpu affinity of a process
4137 * @pid: pid of the process
4138 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4139 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4140 *
4141 * Return: 0 on success. An error code otherwise.
4142 */
4143SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4144		unsigned long __user *, user_mask_ptr)
4145{
4146	int ret;
4147	cpumask_var_t mask;
4148
4149	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4150		return -EINVAL;
4151	if (len & (sizeof(unsigned long)-1))
4152		return -EINVAL;
4153
4154	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4155		return -ENOMEM;
4156
4157	ret = sched_getaffinity(pid, mask);
4158	if (ret == 0) {
4159		size_t retlen = min_t(size_t, len, cpumask_size());
4160
4161		if (copy_to_user(user_mask_ptr, mask, retlen))
4162			ret = -EFAULT;
4163		else
4164			ret = retlen;
4165	}
4166	free_cpumask_var(mask);
4167
4168	return ret;
4169}
4170
4171/**
4172 * sys_sched_yield - yield the current processor to other threads.
4173 *
4174 * This function yields the current CPU to other tasks. If there are no
4175 * other threads running on this CPU then this function will return.
4176 *
4177 * Return: 0.
4178 */
4179SYSCALL_DEFINE0(sched_yield)
4180{
4181	struct rq *rq = this_rq_lock();
4182
4183	schedstat_inc(rq, yld_count);
4184	current->sched_class->yield_task(rq);
4185
4186	/*
4187	 * Since we are going to call schedule() anyway, there's
4188	 * no need to preempt or enable interrupts:
4189	 */
4190	__release(rq->lock);
4191	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4192	do_raw_spin_unlock(&rq->lock);
4193	sched_preempt_enable_no_resched();
4194
4195	schedule();
4196
4197	return 0;
4198}
4199
4200static void __cond_resched(void)
4201{
4202	__preempt_count_add(PREEMPT_ACTIVE);
4203	__schedule();
4204	__preempt_count_sub(PREEMPT_ACTIVE);
4205}
4206
4207int __sched _cond_resched(void)
4208{
4209	if (should_resched()) {
4210		__cond_resched();
4211		return 1;
4212	}
4213	return 0;
4214}
4215EXPORT_SYMBOL(_cond_resched);
4216
4217/*
4218 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4219 * call schedule, and on return reacquire the lock.
4220 *
4221 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4222 * operations here to prevent schedule() from being called twice (once via
4223 * spin_unlock(), once by hand).
4224 */
4225int __cond_resched_lock(spinlock_t *lock)
4226{
4227	int resched = should_resched();
4228	int ret = 0;
4229
4230	lockdep_assert_held(lock);
4231
4232	if (spin_needbreak(lock) || resched) {
4233		spin_unlock(lock);
4234		if (resched)
4235			__cond_resched();
4236		else
4237			cpu_relax();
4238		ret = 1;
4239		spin_lock(lock);
4240	}
4241	return ret;
4242}
4243EXPORT_SYMBOL(__cond_resched_lock);
4244
4245int __sched __cond_resched_softirq(void)
4246{
4247	BUG_ON(!in_softirq());
4248
4249	if (should_resched()) {
4250		local_bh_enable();
4251		__cond_resched();
4252		local_bh_disable();
4253		return 1;
4254	}
4255	return 0;
4256}
4257EXPORT_SYMBOL(__cond_resched_softirq);
4258
4259/**
4260 * yield - yield the current processor to other threads.
4261 *
4262 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4263 *
4264 * The scheduler is at all times free to pick the calling task as the most
4265 * eligible task to run, if removing the yield() call from your code breaks
4266 * it, its already broken.
4267 *
4268 * Typical broken usage is:
4269 *
4270 * while (!event)
4271 * 	yield();
4272 *
4273 * where one assumes that yield() will let 'the other' process run that will
4274 * make event true. If the current task is a SCHED_FIFO task that will never
4275 * happen. Never use yield() as a progress guarantee!!
4276 *
4277 * If you want to use yield() to wait for something, use wait_event().
4278 * If you want to use yield() to be 'nice' for others, use cond_resched().
4279 * If you still want to use yield(), do not!
4280 */
4281void __sched yield(void)
4282{
4283	set_current_state(TASK_RUNNING);
4284	sys_sched_yield();
4285}
4286EXPORT_SYMBOL(yield);
4287
4288/**
4289 * yield_to - yield the current processor to another thread in
4290 * your thread group, or accelerate that thread toward the
4291 * processor it's on.
4292 * @p: target task
4293 * @preempt: whether task preemption is allowed or not
4294 *
4295 * It's the caller's job to ensure that the target task struct
4296 * can't go away on us before we can do any checks.
4297 *
4298 * Return:
4299 *	true (>0) if we indeed boosted the target task.
4300 *	false (0) if we failed to boost the target.
4301 *	-ESRCH if there's no task to yield to.
4302 */
4303int __sched yield_to(struct task_struct *p, bool preempt)
4304{
4305	struct task_struct *curr = current;
4306	struct rq *rq, *p_rq;
4307	unsigned long flags;
4308	int yielded = 0;
4309
4310	local_irq_save(flags);
4311	rq = this_rq();
4312
4313again:
4314	p_rq = task_rq(p);
4315	/*
4316	 * If we're the only runnable task on the rq and target rq also
4317	 * has only one task, there's absolutely no point in yielding.
4318	 */
4319	if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4320		yielded = -ESRCH;
4321		goto out_irq;
4322	}
4323
4324	double_rq_lock(rq, p_rq);
4325	if (task_rq(p) != p_rq) {
4326		double_rq_unlock(rq, p_rq);
4327		goto again;
4328	}
4329
4330	if (!curr->sched_class->yield_to_task)
4331		goto out_unlock;
4332
4333	if (curr->sched_class != p->sched_class)
4334		goto out_unlock;
4335
4336	if (task_running(p_rq, p) || p->state)
4337		goto out_unlock;
4338
4339	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4340	if (yielded) {
4341		schedstat_inc(rq, yld_count);
4342		/*
4343		 * Make p's CPU reschedule; pick_next_entity takes care of
4344		 * fairness.
4345		 */
4346		if (preempt && rq != p_rq)
4347			resched_curr(p_rq);
4348	}
4349
4350out_unlock:
4351	double_rq_unlock(rq, p_rq);
4352out_irq:
4353	local_irq_restore(flags);
4354
4355	if (yielded > 0)
4356		schedule();
4357
4358	return yielded;
4359}
4360EXPORT_SYMBOL_GPL(yield_to);
4361
4362/*
4363 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4364 * that process accounting knows that this is a task in IO wait state.
4365 */
4366void __sched io_schedule(void)
4367{
4368	struct rq *rq = raw_rq();
4369
4370	delayacct_blkio_start();
4371	atomic_inc(&rq->nr_iowait);
4372	blk_flush_plug(current);
4373	current->in_iowait = 1;
4374	schedule();
4375	current->in_iowait = 0;
4376	atomic_dec(&rq->nr_iowait);
4377	delayacct_blkio_end();
4378}
4379EXPORT_SYMBOL(io_schedule);
4380
4381long __sched io_schedule_timeout(long timeout)
4382{
4383	struct rq *rq = raw_rq();
4384	long ret;
4385
4386	delayacct_blkio_start();
4387	atomic_inc(&rq->nr_iowait);
4388	blk_flush_plug(current);
4389	current->in_iowait = 1;
4390	ret = schedule_timeout(timeout);
4391	current->in_iowait = 0;
4392	atomic_dec(&rq->nr_iowait);
4393	delayacct_blkio_end();
4394	return ret;
4395}
4396
4397/**
4398 * sys_sched_get_priority_max - return maximum RT priority.
4399 * @policy: scheduling class.
4400 *
4401 * Return: On success, this syscall returns the maximum
4402 * rt_priority that can be used by a given scheduling class.
4403 * On failure, a negative error code is returned.
4404 */
4405SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4406{
4407	int ret = -EINVAL;
4408
4409	switch (policy) {
4410	case SCHED_FIFO:
4411	case SCHED_RR:
4412		ret = MAX_USER_RT_PRIO-1;
4413		break;
4414	case SCHED_DEADLINE:
4415	case SCHED_NORMAL:
4416	case SCHED_BATCH:
4417	case SCHED_IDLE:
4418		ret = 0;
4419		break;
4420	}
4421	return ret;
4422}
4423
4424/**
4425 * sys_sched_get_priority_min - return minimum RT priority.
4426 * @policy: scheduling class.
4427 *
4428 * Return: On success, this syscall returns the minimum
4429 * rt_priority that can be used by a given scheduling class.
4430 * On failure, a negative error code is returned.
4431 */
4432SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4433{
4434	int ret = -EINVAL;
4435
4436	switch (policy) {
4437	case SCHED_FIFO:
4438	case SCHED_RR:
4439		ret = 1;
4440		break;
4441	case SCHED_DEADLINE:
4442	case SCHED_NORMAL:
4443	case SCHED_BATCH:
4444	case SCHED_IDLE:
4445		ret = 0;
4446	}
4447	return ret;
4448}
4449
4450/**
4451 * sys_sched_rr_get_interval - return the default timeslice of a process.
4452 * @pid: pid of the process.
4453 * @interval: userspace pointer to the timeslice value.
4454 *
4455 * this syscall writes the default timeslice value of a given process
4456 * into the user-space timespec buffer. A value of '0' means infinity.
4457 *
4458 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4459 * an error code.
4460 */
4461SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4462		struct timespec __user *, interval)
4463{
4464	struct task_struct *p;
4465	unsigned int time_slice;
4466	unsigned long flags;
4467	struct rq *rq;
4468	int retval;
4469	struct timespec t;
4470
4471	if (pid < 0)
4472		return -EINVAL;
4473
4474	retval = -ESRCH;
4475	rcu_read_lock();
4476	p = find_process_by_pid(pid);
4477	if (!p)
4478		goto out_unlock;
4479
4480	retval = security_task_getscheduler(p);
4481	if (retval)
4482		goto out_unlock;
4483
4484	rq = task_rq_lock(p, &flags);
4485	time_slice = 0;
4486	if (p->sched_class->get_rr_interval)
4487		time_slice = p->sched_class->get_rr_interval(rq, p);
4488	task_rq_unlock(rq, p, &flags);
4489
4490	rcu_read_unlock();
4491	jiffies_to_timespec(time_slice, &t);
4492	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4493	return retval;
4494
4495out_unlock:
4496	rcu_read_unlock();
4497	return retval;
4498}
4499
4500static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4501
4502void sched_show_task(struct task_struct *p)
4503{
4504	unsigned long free = 0;
4505	int ppid;
4506	unsigned state;
4507
4508	state = p->state ? __ffs(p->state) + 1 : 0;
4509	printk(KERN_INFO "%-15.15s %c", p->comm,
4510		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4511#if BITS_PER_LONG == 32
4512	if (state == TASK_RUNNING)
4513		printk(KERN_CONT " running  ");
4514	else
4515		printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4516#else
4517	if (state == TASK_RUNNING)
4518		printk(KERN_CONT "  running task    ");
4519	else
4520		printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4521#endif
4522#ifdef CONFIG_DEBUG_STACK_USAGE
4523	free = stack_not_used(p);
4524#endif
4525	rcu_read_lock();
4526	ppid = task_pid_nr(rcu_dereference(p->real_parent));
4527	rcu_read_unlock();
4528	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4529		task_pid_nr(p), ppid,
4530		(unsigned long)task_thread_info(p)->flags);
4531
4532	print_worker_info(KERN_INFO, p);
4533	show_stack(p, NULL);
4534}
4535
4536void show_state_filter(unsigned long state_filter)
4537{
4538	struct task_struct *g, *p;
4539
4540#if BITS_PER_LONG == 32
4541	printk(KERN_INFO
4542		"  task                PC stack   pid father\n");
4543#else
4544	printk(KERN_INFO
4545		"  task                        PC stack   pid father\n");
4546#endif
4547	rcu_read_lock();
4548	for_each_process_thread(g, p) {
4549		/*
4550		 * reset the NMI-timeout, listing all files on a slow
4551		 * console might take a lot of time:
4552		 */
4553		touch_nmi_watchdog();
4554		if (!state_filter || (p->state & state_filter))
4555			sched_show_task(p);
4556	}
4557
4558	touch_all_softlockup_watchdogs();
4559
4560#ifdef CONFIG_SCHED_DEBUG
4561	sysrq_sched_debug_show();
4562#endif
4563	rcu_read_unlock();
4564	/*
4565	 * Only show locks if all tasks are dumped:
4566	 */
4567	if (!state_filter)
4568		debug_show_all_locks();
4569}
4570
4571void init_idle_bootup_task(struct task_struct *idle)
4572{
4573	idle->sched_class = &idle_sched_class;
4574}
4575
4576/**
4577 * init_idle - set up an idle thread for a given CPU
4578 * @idle: task in question
4579 * @cpu: cpu the idle task belongs to
4580 *
4581 * NOTE: this function does not set the idle thread's NEED_RESCHED
4582 * flag, to make booting more robust.
4583 */
4584void init_idle(struct task_struct *idle, int cpu)
4585{
4586	struct rq *rq = cpu_rq(cpu);
4587	unsigned long flags;
4588
4589	raw_spin_lock_irqsave(&rq->lock, flags);
4590
4591	__sched_fork(0, idle);
4592	idle->state = TASK_RUNNING;
4593	idle->se.exec_start = sched_clock();
4594
4595	do_set_cpus_allowed(idle, cpumask_of(cpu));
4596	/*
4597	 * We're having a chicken and egg problem, even though we are
4598	 * holding rq->lock, the cpu isn't yet set to this cpu so the
4599	 * lockdep check in task_group() will fail.
4600	 *
4601	 * Similar case to sched_fork(). / Alternatively we could
4602	 * use task_rq_lock() here and obtain the other rq->lock.
4603	 *
4604	 * Silence PROVE_RCU
4605	 */
4606	rcu_read_lock();
4607	__set_task_cpu(idle, cpu);
4608	rcu_read_unlock();
4609
4610	rq->curr = rq->idle = idle;
4611	idle->on_rq = TASK_ON_RQ_QUEUED;
4612#if defined(CONFIG_SMP)
4613	idle->on_cpu = 1;
4614#endif
4615	raw_spin_unlock_irqrestore(&rq->lock, flags);
4616
4617	/* Set the preempt count _outside_ the spinlocks! */
4618	init_idle_preempt_count(idle, cpu);
4619
4620	/*
4621	 * The idle tasks have their own, simple scheduling class:
4622	 */
4623	idle->sched_class = &idle_sched_class;
4624	ftrace_graph_init_idle_task(idle, cpu);
4625	vtime_init_idle(idle, cpu);
4626#if defined(CONFIG_SMP)
4627	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4628#endif
4629}
4630
4631#ifdef CONFIG_SMP
4632/*
4633 * move_queued_task - move a queued task to new rq.
4634 *
4635 * Returns (locked) new rq. Old rq's lock is released.
4636 */
4637static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
4638{
4639	struct rq *rq = task_rq(p);
4640
4641	lockdep_assert_held(&rq->lock);
4642
4643	dequeue_task(rq, p, 0);
4644	p->on_rq = TASK_ON_RQ_MIGRATING;
4645	set_task_cpu(p, new_cpu);
4646	raw_spin_unlock(&rq->lock);
4647
4648	rq = cpu_rq(new_cpu);
4649
4650	raw_spin_lock(&rq->lock);
4651	BUG_ON(task_cpu(p) != new_cpu);
4652	p->on_rq = TASK_ON_RQ_QUEUED;
4653	enqueue_task(rq, p, 0);
4654	check_preempt_curr(rq, p, 0);
4655
4656	return rq;
4657}
4658
4659void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4660{
4661	if (p->sched_class && p->sched_class->set_cpus_allowed)
4662		p->sched_class->set_cpus_allowed(p, new_mask);
4663
4664	cpumask_copy(&p->cpus_allowed, new_mask);
4665	p->nr_cpus_allowed = cpumask_weight(new_mask);
4666}
4667
4668/*
4669 * This is how migration works:
4670 *
4671 * 1) we invoke migration_cpu_stop() on the target CPU using
4672 *    stop_one_cpu().
4673 * 2) stopper starts to run (implicitly forcing the migrated thread
4674 *    off the CPU)
4675 * 3) it checks whether the migrated task is still in the wrong runqueue.
4676 * 4) if it's in the wrong runqueue then the migration thread removes
4677 *    it and puts it into the right queue.
4678 * 5) stopper completes and stop_one_cpu() returns and the migration
4679 *    is done.
4680 */
4681
4682/*
4683 * Change a given task's CPU affinity. Migrate the thread to a
4684 * proper CPU and schedule it away if the CPU it's executing on
4685 * is removed from the allowed bitmask.
4686 *
4687 * NOTE: the caller must have a valid reference to the task, the
4688 * task must not exit() & deallocate itself prematurely. The
4689 * call is not atomic; no spinlocks may be held.
4690 */
4691int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4692{
4693	unsigned long flags;
4694	struct rq *rq;
4695	unsigned int dest_cpu;
4696	int ret = 0;
4697
4698	rq = task_rq_lock(p, &flags);
4699
4700	if (cpumask_equal(&p->cpus_allowed, new_mask))
4701		goto out;
4702
4703	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
4704		ret = -EINVAL;
4705		goto out;
4706	}
4707
4708	do_set_cpus_allowed(p, new_mask);
4709
4710	/* Can the task run on the task's current CPU? If so, we're done */
4711	if (cpumask_test_cpu(task_cpu(p), new_mask))
4712		goto out;
4713
4714	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4715	if (task_running(rq, p) || p->state == TASK_WAKING) {
4716		struct migration_arg arg = { p, dest_cpu };
4717		/* Need help from migration thread: drop lock and wait. */
4718		task_rq_unlock(rq, p, &flags);
4719		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
4720		tlb_migrate_finish(p->mm);
4721		return 0;
4722	} else if (task_on_rq_queued(p))
4723		rq = move_queued_task(p, dest_cpu);
4724out:
4725	task_rq_unlock(rq, p, &flags);
4726
4727	return ret;
4728}
4729EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
4730
4731/*
4732 * Move (not current) task off this cpu, onto dest cpu. We're doing
4733 * this because either it can't run here any more (set_cpus_allowed()
4734 * away from this CPU, or CPU going down), or because we're
4735 * attempting to rebalance this task on exec (sched_exec).
4736 *
4737 * So we race with normal scheduler movements, but that's OK, as long
4738 * as the task is no longer on this CPU.
4739 *
4740 * Returns non-zero if task was successfully migrated.
4741 */
4742static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4743{
4744	struct rq *rq;
4745	int ret = 0;
4746
4747	if (unlikely(!cpu_active(dest_cpu)))
4748		return ret;
4749
4750	rq = cpu_rq(src_cpu);
4751
4752	raw_spin_lock(&p->pi_lock);
4753	raw_spin_lock(&rq->lock);
4754	/* Already moved. */
4755	if (task_cpu(p) != src_cpu)
4756		goto done;
4757
4758	/* Affinity changed (again). */
4759	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
4760		goto fail;
4761
4762	/*
4763	 * If we're not on a rq, the next wake-up will ensure we're
4764	 * placed properly.
4765	 */
4766	if (task_on_rq_queued(p))
4767		rq = move_queued_task(p, dest_cpu);
4768done:
4769	ret = 1;
4770fail:
4771	raw_spin_unlock(&rq->lock);
4772	raw_spin_unlock(&p->pi_lock);
4773	return ret;
4774}
4775
4776#ifdef CONFIG_NUMA_BALANCING
4777/* Migrate current task p to target_cpu */
4778int migrate_task_to(struct task_struct *p, int target_cpu)
4779{
4780	struct migration_arg arg = { p, target_cpu };
4781	int curr_cpu = task_cpu(p);
4782
4783	if (curr_cpu == target_cpu)
4784		return 0;
4785
4786	if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
4787		return -EINVAL;
4788
4789	/* TODO: This is not properly updating schedstats */
4790
4791	trace_sched_move_numa(p, curr_cpu, target_cpu);
4792	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
4793}
4794
4795/*
4796 * Requeue a task on a given node and accurately track the number of NUMA
4797 * tasks on the runqueues
4798 */
4799void sched_setnuma(struct task_struct *p, int nid)
4800{
4801	struct rq *rq;
4802	unsigned long flags;
4803	bool queued, running;
4804
4805	rq = task_rq_lock(p, &flags);
4806	queued = task_on_rq_queued(p);
4807	running = task_current(rq, p);
4808
4809	if (queued)
4810		dequeue_task(rq, p, 0);
4811	if (running)
4812		put_prev_task(rq, p);
4813
4814	p->numa_preferred_nid = nid;
4815
4816	if (running)
4817		p->sched_class->set_curr_task(rq);
4818	if (queued)
4819		enqueue_task(rq, p, 0);
4820	task_rq_unlock(rq, p, &flags);
4821}
4822#endif
4823
4824/*
4825 * migration_cpu_stop - this will be executed by a highprio stopper thread
4826 * and performs thread migration by bumping thread off CPU then
4827 * 'pushing' onto another runqueue.
4828 */
4829static int migration_cpu_stop(void *data)
4830{
4831	struct migration_arg *arg = data;
4832
4833	/*
4834	 * The original target cpu might have gone down and we might
4835	 * be on another cpu but it doesn't matter.
4836	 */
4837	local_irq_disable();
4838	/*
4839	 * We need to explicitly wake pending tasks before running
4840	 * __migrate_task() such that we will not miss enforcing cpus_allowed
4841	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
4842	 */
4843	sched_ttwu_pending();
4844	__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
4845	local_irq_enable();
4846	return 0;
4847}
4848
4849#ifdef CONFIG_HOTPLUG_CPU
4850
4851/*
4852 * Ensures that the idle task is using init_mm right before its cpu goes
4853 * offline.
4854 */
4855void idle_task_exit(void)
4856{
4857	struct mm_struct *mm = current->active_mm;
4858
4859	BUG_ON(cpu_online(smp_processor_id()));
4860
4861	if (mm != &init_mm) {
4862		switch_mm(mm, &init_mm, current);
4863		finish_arch_post_lock_switch();
4864	}
4865	mmdrop(mm);
4866}
4867
4868/*
4869 * Since this CPU is going 'away' for a while, fold any nr_active delta
4870 * we might have. Assumes we're called after migrate_tasks() so that the
4871 * nr_active count is stable.
4872 *
4873 * Also see the comment "Global load-average calculations".
4874 */
4875static void calc_load_migrate(struct rq *rq)
4876{
4877	long delta = calc_load_fold_active(rq);
4878	if (delta)
4879		atomic_long_add(delta, &calc_load_tasks);
4880}
4881
4882static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
4883{
4884}
4885
4886static const struct sched_class fake_sched_class = {
4887	.put_prev_task = put_prev_task_fake,
4888};
4889
4890static struct task_struct fake_task = {
4891	/*
4892	 * Avoid pull_{rt,dl}_task()
4893	 */
4894	.prio = MAX_PRIO + 1,
4895	.sched_class = &fake_sched_class,
4896};
4897
4898/*
4899 * Migrate all tasks from the rq, sleeping tasks will be migrated by
4900 * try_to_wake_up()->select_task_rq().
4901 *
4902 * Called with rq->lock held even though we'er in stop_machine() and
4903 * there's no concurrency possible, we hold the required locks anyway
4904 * because of lock validation efforts.
4905 */
4906static void migrate_tasks(unsigned int dead_cpu)
4907{
4908	struct rq *rq = cpu_rq(dead_cpu);
4909	struct task_struct *next, *stop = rq->stop;
4910	int dest_cpu;
4911
4912	/*
4913	 * Fudge the rq selection such that the below task selection loop
4914	 * doesn't get stuck on the currently eligible stop task.
4915	 *
4916	 * We're currently inside stop_machine() and the rq is either stuck
4917	 * in the stop_machine_cpu_stop() loop, or we're executing this code,
4918	 * either way we should never end up calling schedule() until we're
4919	 * done here.
4920	 */
4921	rq->stop = NULL;
4922
4923	/*
4924	 * put_prev_task() and pick_next_task() sched
4925	 * class method both need to have an up-to-date
4926	 * value of rq->clock[_task]
4927	 */
4928	update_rq_clock(rq);
4929
4930	for ( ; ; ) {
4931		/*
4932		 * There's this thread running, bail when that's the only
4933		 * remaining thread.
4934		 */
4935		if (rq->nr_running == 1)
4936			break;
4937
4938		next = pick_next_task(rq, &fake_task);
4939		BUG_ON(!next);
4940		next->sched_class->put_prev_task(rq, next);
4941
4942		/* Find suitable destination for @next, with force if needed. */
4943		dest_cpu = select_fallback_rq(dead_cpu, next);
4944		raw_spin_unlock(&rq->lock);
4945
4946		__migrate_task(next, dead_cpu, dest_cpu);
4947
4948		raw_spin_lock(&rq->lock);
4949	}
4950
4951	rq->stop = stop;
4952}
4953
4954#endif /* CONFIG_HOTPLUG_CPU */
4955
4956#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
4957
4958static struct ctl_table sd_ctl_dir[] = {
4959	{
4960		.procname	= "sched_domain",
4961		.mode		= 0555,
4962	},
4963	{}
4964};
4965
4966static struct ctl_table sd_ctl_root[] = {
4967	{
4968		.procname	= "kernel",
4969		.mode		= 0555,
4970		.child		= sd_ctl_dir,
4971	},
4972	{}
4973};
4974
4975static struct ctl_table *sd_alloc_ctl_entry(int n)
4976{
4977	struct ctl_table *entry =
4978		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
4979
4980	return entry;
4981}
4982
4983static void sd_free_ctl_entry(struct ctl_table **tablep)
4984{
4985	struct ctl_table *entry;
4986
4987	/*
4988	 * In the intermediate directories, both the child directory and
4989	 * procname are dynamically allocated and could fail but the mode
4990	 * will always be set. In the lowest directory the names are
4991	 * static strings and all have proc handlers.
4992	 */
4993	for (entry = *tablep; entry->mode; entry++) {
4994		if (entry->child)
4995			sd_free_ctl_entry(&entry->child);
4996		if (entry->proc_handler == NULL)
4997			kfree(entry->procname);
4998	}
4999
5000	kfree(*tablep);
5001	*tablep = NULL;
5002}
5003
5004static int min_load_idx = 0;
5005static int max_load_idx = CPU_LOAD_IDX_MAX-1;
5006
5007static void
5008set_table_entry(struct ctl_table *entry,
5009		const char *procname, void *data, int maxlen,
5010		umode_t mode, proc_handler *proc_handler,
5011		bool load_idx)
5012{
5013	entry->procname = procname;
5014	entry->data = data;
5015	entry->maxlen = maxlen;
5016	entry->mode = mode;
5017	entry->proc_handler = proc_handler;
5018
5019	if (load_idx) {
5020		entry->extra1 = &min_load_idx;
5021		entry->extra2 = &max_load_idx;
5022	}
5023}
5024
5025static struct ctl_table *
5026sd_alloc_ctl_domain_table(struct sched_domain *sd)
5027{
5028	struct ctl_table *table = sd_alloc_ctl_entry(14);
5029
5030	if (table == NULL)
5031		return NULL;
5032
5033	set_table_entry(&table[0], "min_interval", &sd->min_interval,
5034		sizeof(long), 0644, proc_doulongvec_minmax, false);
5035	set_table_entry(&table[1], "max_interval", &sd->max_interval,
5036		sizeof(long), 0644, proc_doulongvec_minmax, false);
5037	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
5038		sizeof(int), 0644, proc_dointvec_minmax, true);
5039	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
5040		sizeof(int), 0644, proc_dointvec_minmax, true);
5041	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
5042		sizeof(int), 0644, proc_dointvec_minmax, true);
5043	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
5044		sizeof(int), 0644, proc_dointvec_minmax, true);
5045	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
5046		sizeof(int), 0644, proc_dointvec_minmax, true);
5047	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
5048		sizeof(int), 0644, proc_dointvec_minmax, false);
5049	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
5050		sizeof(int), 0644, proc_dointvec_minmax, false);
5051	set_table_entry(&table[9], "cache_nice_tries",
5052		&sd->cache_nice_tries,
5053		sizeof(int), 0644, proc_dointvec_minmax, false);
5054	set_table_entry(&table[10], "flags", &sd->flags,
5055		sizeof(int), 0644, proc_dointvec_minmax, false);
5056	set_table_entry(&table[11], "max_newidle_lb_cost",
5057		&sd->max_newidle_lb_cost,
5058		sizeof(long), 0644, proc_doulongvec_minmax, false);
5059	set_table_entry(&table[12], "name", sd->name,
5060		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
5061	/* &table[13] is terminator */
5062
5063	return table;
5064}
5065
5066static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5067{
5068	struct ctl_table *entry, *table;
5069	struct sched_domain *sd;
5070	int domain_num = 0, i;
5071	char buf[32];
5072
5073	for_each_domain(cpu, sd)
5074		domain_num++;
5075	entry = table = sd_alloc_ctl_entry(domain_num + 1);
5076	if (table == NULL)
5077		return NULL;
5078
5079	i = 0;
5080	for_each_domain(cpu, sd) {
5081		snprintf(buf, 32, "domain%d", i);
5082		entry->procname = kstrdup(buf, GFP_KERNEL);
5083		entry->mode = 0555;
5084		entry->child = sd_alloc_ctl_domain_table(sd);
5085		entry++;
5086		i++;
5087	}
5088	return table;
5089}
5090
5091static struct ctl_table_header *sd_sysctl_header;
5092static void register_sched_domain_sysctl(void)
5093{
5094	int i, cpu_num = num_possible_cpus();
5095	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5096	char buf[32];
5097
5098	WARN_ON(sd_ctl_dir[0].child);
5099	sd_ctl_dir[0].child = entry;
5100
5101	if (entry == NULL)
5102		return;
5103
5104	for_each_possible_cpu(i) {
5105		snprintf(buf, 32, "cpu%d", i);
5106		entry->procname = kstrdup(buf, GFP_KERNEL);
5107		entry->mode = 0555;
5108		entry->child = sd_alloc_ctl_cpu_table(i);
5109		entry++;
5110	}
5111
5112	WARN_ON(sd_sysctl_header);
5113	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5114}
5115
5116/* may be called multiple times per register */
5117static void unregister_sched_domain_sysctl(void)
5118{
5119	if (sd_sysctl_header)
5120		unregister_sysctl_table(sd_sysctl_header);
5121	sd_sysctl_header = NULL;
5122	if (sd_ctl_dir[0].child)
5123		sd_free_ctl_entry(&sd_ctl_dir[0].child);
5124}
5125#else
5126static void register_sched_domain_sysctl(void)
5127{
5128}
5129static void unregister_sched_domain_sysctl(void)
5130{
5131}
5132#endif
5133
5134static void set_rq_online(struct rq *rq)
5135{
5136	if (!rq->online) {
5137		const struct sched_class *class;
5138
5139		cpumask_set_cpu(rq->cpu, rq->rd->online);
5140		rq->online = 1;
5141
5142		for_each_class(class) {
5143			if (class->rq_online)
5144				class->rq_online(rq);
5145		}
5146	}
5147}
5148
5149static void set_rq_offline(struct rq *rq)
5150{
5151	if (rq->online) {
5152		const struct sched_class *class;
5153
5154		for_each_class(class) {
5155			if (class->rq_offline)
5156				class->rq_offline(rq);
5157		}
5158
5159		cpumask_clear_cpu(rq->cpu, rq->rd->online);
5160		rq->online = 0;
5161	}
5162}
5163
5164/*
5165 * migration_call - callback that gets triggered when a CPU is added.
5166 * Here we can start up the necessary migration thread for the new CPU.
5167 */
5168static int
5169migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5170{
5171	int cpu = (long)hcpu;
5172	unsigned long flags;
5173	struct rq *rq = cpu_rq(cpu);
5174
5175	switch (action & ~CPU_TASKS_FROZEN) {
5176
5177	case CPU_UP_PREPARE:
5178		rq->calc_load_update = calc_load_update;
5179		break;
5180
5181	case CPU_ONLINE:
5182		/* Update our root-domain */
5183		raw_spin_lock_irqsave(&rq->lock, flags);
5184		if (rq->rd) {
5185			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5186
5187			set_rq_online(rq);
5188		}
5189		raw_spin_unlock_irqrestore(&rq->lock, flags);
5190		break;
5191
5192#ifdef CONFIG_HOTPLUG_CPU
5193	case CPU_DYING:
5194		sched_ttwu_pending();
5195		/* Update our root-domain */
5196		raw_spin_lock_irqsave(&rq->lock, flags);
5197		if (rq->rd) {
5198			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5199			set_rq_offline(rq);
5200		}
5201		migrate_tasks(cpu);
5202		BUG_ON(rq->nr_running != 1); /* the migration thread */
5203		raw_spin_unlock_irqrestore(&rq->lock, flags);
5204		break;
5205
5206	case CPU_DEAD:
5207		calc_load_migrate(rq);
5208		break;
5209#endif
5210	}
5211
5212	update_max_interval();
5213
5214	return NOTIFY_OK;
5215}
5216
5217/*
5218 * Register at high priority so that task migration (migrate_all_tasks)
5219 * happens before everything else.  This has to be lower priority than
5220 * the notifier in the perf_event subsystem, though.
5221 */
5222static struct notifier_block migration_notifier = {
5223	.notifier_call = migration_call,
5224	.priority = CPU_PRI_MIGRATION,
5225};
5226
5227static void __cpuinit set_cpu_rq_start_time(void)
5228{
5229	int cpu = smp_processor_id();
5230	struct rq *rq = cpu_rq(cpu);
5231	rq->age_stamp = sched_clock_cpu(cpu);
5232}
5233
5234static int sched_cpu_active(struct notifier_block *nfb,
5235				      unsigned long action, void *hcpu)
5236{
5237	switch (action & ~CPU_TASKS_FROZEN) {
5238	case CPU_STARTING:
5239		set_cpu_rq_start_time();
5240		return NOTIFY_OK;
5241	case CPU_DOWN_FAILED:
5242		set_cpu_active((long)hcpu, true);
5243		return NOTIFY_OK;
5244	default:
5245		return NOTIFY_DONE;
5246	}
5247}
5248
5249static int sched_cpu_inactive(struct notifier_block *nfb,
5250					unsigned long action, void *hcpu)
5251{
5252	unsigned long flags;
5253	long cpu = (long)hcpu;
5254
5255	switch (action & ~CPU_TASKS_FROZEN) {
5256	case CPU_DOWN_PREPARE:
5257		set_cpu_active(cpu, false);
5258
5259		/* explicitly allow suspend */
5260		if (!(action & CPU_TASKS_FROZEN)) {
5261			struct dl_bw *dl_b = dl_bw_of(cpu);
5262			bool overflow;
5263			int cpus;
5264
5265			raw_spin_lock_irqsave(&dl_b->lock, flags);
5266			cpus = dl_bw_cpus(cpu);
5267			overflow = __dl_overflow(dl_b, cpus, 0, 0);
5268			raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5269
5270			if (overflow)
5271				return notifier_from_errno(-EBUSY);
5272		}
5273		return NOTIFY_OK;
5274	}
5275
5276	return NOTIFY_DONE;
5277}
5278
5279static int __init migration_init(void)
5280{
5281	void *cpu = (void *)(long)smp_processor_id();
5282	int err;
5283
5284	/* Initialize migration for the boot CPU */
5285	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5286	BUG_ON(err == NOTIFY_BAD);
5287	migration_call(&migration_notifier, CPU_ONLINE, cpu);
5288	register_cpu_notifier(&migration_notifier);
5289
5290	/* Register cpu active notifiers */
5291	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5292	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5293
5294	return 0;
5295}
5296early_initcall(migration_init);
5297#endif
5298
5299#ifdef CONFIG_SMP
5300
5301static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5302
5303#ifdef CONFIG_SCHED_DEBUG
5304
5305static __read_mostly int sched_debug_enabled;
5306
5307static int __init sched_debug_setup(char *str)
5308{
5309	sched_debug_enabled = 1;
5310
5311	return 0;
5312}
5313early_param("sched_debug", sched_debug_setup);
5314
5315static inline bool sched_debug(void)
5316{
5317	return sched_debug_enabled;
5318}
5319
5320static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5321				  struct cpumask *groupmask)
5322{
5323	struct sched_group *group = sd->groups;
5324	char str[256];
5325
5326	cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
5327	cpumask_clear(groupmask);
5328
5329	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5330
5331	if (!(sd->flags & SD_LOAD_BALANCE)) {
5332		printk("does not load-balance\n");
5333		if (sd->parent)
5334			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5335					" has parent");
5336		return -1;
5337	}
5338
5339	printk(KERN_CONT "span %s level %s\n", str, sd->name);
5340
5341	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5342		printk(KERN_ERR "ERROR: domain->span does not contain "
5343				"CPU%d\n", cpu);
5344	}
5345	if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5346		printk(KERN_ERR "ERROR: domain->groups does not contain"
5347				" CPU%d\n", cpu);
5348	}
5349
5350	printk(KERN_DEBUG "%*s groups:", level + 1, "");
5351	do {
5352		if (!group) {
5353			printk("\n");
5354			printk(KERN_ERR "ERROR: group is NULL\n");
5355			break;
5356		}
5357
5358		/*
5359		 * Even though we initialize ->capacity to something semi-sane,
5360		 * we leave capacity_orig unset. This allows us to detect if
5361		 * domain iteration is still funny without causing /0 traps.
5362		 */
5363		if (!group->sgc->capacity_orig) {
5364			printk(KERN_CONT "\n");
5365			printk(KERN_ERR "ERROR: domain->cpu_capacity not set\n");
5366			break;
5367		}
5368
5369		if (!cpumask_weight(sched_group_cpus(group))) {
5370			printk(KERN_CONT "\n");
5371			printk(KERN_ERR "ERROR: empty group\n");
5372			break;
5373		}
5374
5375		if (!(sd->flags & SD_OVERLAP) &&
5376		    cpumask_intersects(groupmask, sched_group_cpus(group))) {
5377			printk(KERN_CONT "\n");
5378			printk(KERN_ERR "ERROR: repeated CPUs\n");
5379			break;
5380		}
5381
5382		cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5383
5384		cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
5385
5386		printk(KERN_CONT " %s", str);
5387		if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
5388			printk(KERN_CONT " (cpu_capacity = %d)",
5389				group->sgc->capacity);
5390		}
5391
5392		group = group->next;
5393	} while (group != sd->groups);
5394	printk(KERN_CONT "\n");
5395
5396	if (!cpumask_equal(sched_domain_span(sd), groupmask))
5397		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5398
5399	if (sd->parent &&
5400	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5401		printk(KERN_ERR "ERROR: parent span is not a superset "
5402			"of domain->span\n");
5403	return 0;
5404}
5405
5406static void sched_domain_debug(struct sched_domain *sd, int cpu)
5407{
5408	int level = 0;
5409
5410	if (!sched_debug_enabled)
5411		return;
5412
5413	if (!sd) {
5414		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5415		return;
5416	}
5417
5418	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5419
5420	for (;;) {
5421		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5422			break;
5423		level++;
5424		sd = sd->parent;
5425		if (!sd)
5426			break;
5427	}
5428}
5429#else /* !CONFIG_SCHED_DEBUG */
5430# define sched_domain_debug(sd, cpu) do { } while (0)
5431static inline bool sched_debug(void)
5432{
5433	return false;
5434}
5435#endif /* CONFIG_SCHED_DEBUG */
5436
5437static int sd_degenerate(struct sched_domain *sd)
5438{
5439	if (cpumask_weight(sched_domain_span(sd)) == 1)
5440		return 1;
5441
5442	/* Following flags need at least 2 groups */
5443	if (sd->flags & (SD_LOAD_BALANCE |
5444			 SD_BALANCE_NEWIDLE |
5445			 SD_BALANCE_FORK |
5446			 SD_BALANCE_EXEC |
5447			 SD_SHARE_CPUCAPACITY |
5448			 SD_SHARE_PKG_RESOURCES |
5449			 SD_SHARE_POWERDOMAIN)) {
5450		if (sd->groups != sd->groups->next)
5451			return 0;
5452	}
5453
5454	/* Following flags don't use groups */
5455	if (sd->flags & (SD_WAKE_AFFINE))
5456		return 0;
5457
5458	return 1;
5459}
5460
5461static int
5462sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5463{
5464	unsigned long cflags = sd->flags, pflags = parent->flags;
5465
5466	if (sd_degenerate(parent))
5467		return 1;
5468
5469	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5470		return 0;
5471
5472	/* Flags needing groups don't count if only 1 group in parent */
5473	if (parent->groups == parent->groups->next) {
5474		pflags &= ~(SD_LOAD_BALANCE |
5475				SD_BALANCE_NEWIDLE |
5476				SD_BALANCE_FORK |
5477				SD_BALANCE_EXEC |
5478				SD_SHARE_CPUCAPACITY |
5479				SD_SHARE_PKG_RESOURCES |
5480				SD_PREFER_SIBLING |
5481				SD_SHARE_POWERDOMAIN);
5482		if (nr_node_ids == 1)
5483			pflags &= ~SD_SERIALIZE;
5484	}
5485	if (~cflags & pflags)
5486		return 0;
5487
5488	return 1;
5489}
5490
5491static void free_rootdomain(struct rcu_head *rcu)
5492{
5493	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5494
5495	cpupri_cleanup(&rd->cpupri);
5496	cpudl_cleanup(&rd->cpudl);
5497	free_cpumask_var(rd->dlo_mask);
5498	free_cpumask_var(rd->rto_mask);
5499	free_cpumask_var(rd->online);
5500	free_cpumask_var(rd->span);
5501	kfree(rd);
5502}
5503
5504static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5505{
5506	struct root_domain *old_rd = NULL;
5507	unsigned long flags;
5508
5509	raw_spin_lock_irqsave(&rq->lock, flags);
5510
5511	if (rq->rd) {
5512		old_rd = rq->rd;
5513
5514		if (cpumask_test_cpu(rq->cpu, old_rd->online))
5515			set_rq_offline(rq);
5516
5517		cpumask_clear_cpu(rq->cpu, old_rd->span);
5518
5519		/*
5520		 * If we dont want to free the old_rd yet then
5521		 * set old_rd to NULL to skip the freeing later
5522		 * in this function:
5523		 */
5524		if (!atomic_dec_and_test(&old_rd->refcount))
5525			old_rd = NULL;
5526	}
5527
5528	atomic_inc(&rd->refcount);
5529	rq->rd = rd;
5530
5531	cpumask_set_cpu(rq->cpu, rd->span);
5532	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5533		set_rq_online(rq);
5534
5535	raw_spin_unlock_irqrestore(&rq->lock, flags);
5536
5537	if (old_rd)
5538		call_rcu_sched(&old_rd->rcu, free_rootdomain);
5539}
5540
5541static int init_rootdomain(struct root_domain *rd)
5542{
5543	memset(rd, 0, sizeof(*rd));
5544
5545	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
5546		goto out;
5547	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
5548		goto free_span;
5549	if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5550		goto free_online;
5551	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5552		goto free_dlo_mask;
5553
5554	init_dl_bw(&rd->dl_bw);
5555	if (cpudl_init(&rd->cpudl) != 0)
5556		goto free_dlo_mask;
5557
5558	if (cpupri_init(&rd->cpupri) != 0)
5559		goto free_rto_mask;
5560	return 0;
5561
5562free_rto_mask:
5563	free_cpumask_var(rd->rto_mask);
5564free_dlo_mask:
5565	free_cpumask_var(rd->dlo_mask);
5566free_online:
5567	free_cpumask_var(rd->online);
5568free_span:
5569	free_cpumask_var(rd->span);
5570out:
5571	return -ENOMEM;
5572}
5573
5574/*
5575 * By default the system creates a single root-domain with all cpus as
5576 * members (mimicking the global state we have today).
5577 */
5578struct root_domain def_root_domain;
5579
5580static void init_defrootdomain(void)
5581{
5582	init_rootdomain(&def_root_domain);
5583
5584	atomic_set(&def_root_domain.refcount, 1);
5585}
5586
5587static struct root_domain *alloc_rootdomain(void)
5588{
5589	struct root_domain *rd;
5590
5591	rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5592	if (!rd)
5593		return NULL;
5594
5595	if (init_rootdomain(rd) != 0) {
5596		kfree(rd);
5597		return NULL;
5598	}
5599
5600	return rd;
5601}
5602
5603static void free_sched_groups(struct sched_group *sg, int free_sgc)
5604{
5605	struct sched_group *tmp, *first;
5606
5607	if (!sg)
5608		return;
5609
5610	first = sg;
5611	do {
5612		tmp = sg->next;
5613
5614		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5615			kfree(sg->sgc);
5616
5617		kfree(sg);
5618		sg = tmp;
5619	} while (sg != first);
5620}
5621
5622static void free_sched_domain(struct rcu_head *rcu)
5623{
5624	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5625
5626	/*
5627	 * If its an overlapping domain it has private groups, iterate and
5628	 * nuke them all.
5629	 */
5630	if (sd->flags & SD_OVERLAP) {
5631		free_sched_groups(sd->groups, 1);
5632	} else if (atomic_dec_and_test(&sd->groups->ref)) {
5633		kfree(sd->groups->sgc);
5634		kfree(sd->groups);
5635	}
5636	kfree(sd);
5637}
5638
5639static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5640{
5641	call_rcu(&sd->rcu, free_sched_domain);
5642}
5643
5644static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5645{
5646	for (; sd; sd = sd->parent)
5647		destroy_sched_domain(sd, cpu);
5648}
5649
5650/*
5651 * Keep a special pointer to the highest sched_domain that has
5652 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5653 * allows us to avoid some pointer chasing select_idle_sibling().
5654 *
5655 * Also keep a unique ID per domain (we use the first cpu number in
5656 * the cpumask of the domain), this allows us to quickly tell if
5657 * two cpus are in the same cache domain, see cpus_share_cache().
5658 */
5659DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5660DEFINE_PER_CPU(int, sd_llc_size);
5661DEFINE_PER_CPU(int, sd_llc_id);
5662DEFINE_PER_CPU(struct sched_domain *, sd_numa);
5663DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5664DEFINE_PER_CPU(struct sched_domain *, sd_asym);
5665
5666static void update_top_cache_domain(int cpu)
5667{
5668	struct sched_domain *sd;
5669	struct sched_domain *busy_sd = NULL;
5670	int id = cpu;
5671	int size = 1;
5672
5673	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
5674	if (sd) {
5675		id = cpumask_first(sched_domain_span(sd));
5676		size = cpumask_weight(sched_domain_span(sd));
5677		busy_sd = sd->parent; /* sd_busy */
5678	}
5679	rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
5680
5681	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5682	per_cpu(sd_llc_size, cpu) = size;
5683	per_cpu(sd_llc_id, cpu) = id;
5684
5685	sd = lowest_flag_domain(cpu, SD_NUMA);
5686	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
5687
5688	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5689	rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
5690}
5691
5692/*
5693 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
5694 * hold the hotplug lock.
5695 */
5696static void
5697cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
5698{
5699	struct rq *rq = cpu_rq(cpu);
5700	struct sched_domain *tmp;
5701
5702	/* Remove the sched domains which do not contribute to scheduling. */
5703	for (tmp = sd; tmp; ) {
5704		struct sched_domain *parent = tmp->parent;
5705		if (!parent)
5706			break;
5707
5708		if (sd_parent_degenerate(tmp, parent)) {
5709			tmp->parent = parent->parent;
5710			if (parent->parent)
5711				parent->parent->child = tmp;
5712			/*
5713			 * Transfer SD_PREFER_SIBLING down in case of a
5714			 * degenerate parent; the spans match for this
5715			 * so the property transfers.
5716			 */
5717			if (parent->flags & SD_PREFER_SIBLING)
5718				tmp->flags |= SD_PREFER_SIBLING;
5719			destroy_sched_domain(parent, cpu);
5720		} else
5721			tmp = tmp->parent;
5722	}
5723
5724	if (sd && sd_degenerate(sd)) {
5725		tmp = sd;
5726		sd = sd->parent;
5727		destroy_sched_domain(tmp, cpu);
5728		if (sd)
5729			sd->child = NULL;
5730	}
5731
5732	sched_domain_debug(sd, cpu);
5733
5734	rq_attach_root(rq, rd);
5735	tmp = rq->sd;
5736	rcu_assign_pointer(rq->sd, sd);
5737	destroy_sched_domains(tmp, cpu);
5738
5739	update_top_cache_domain(cpu);
5740}
5741
5742/* cpus with isolated domains */
5743static cpumask_var_t cpu_isolated_map;
5744
5745/* Setup the mask of cpus configured for isolated domains */
5746static int __init isolated_cpu_setup(char *str)
5747{
5748	alloc_bootmem_cpumask_var(&cpu_isolated_map);
5749	cpulist_parse(str, cpu_isolated_map);
5750	return 1;
5751}
5752
5753__setup("isolcpus=", isolated_cpu_setup);
5754
5755struct s_data {
5756	struct sched_domain ** __percpu sd;
5757	struct root_domain	*rd;
5758};
5759
5760enum s_alloc {
5761	sa_rootdomain,
5762	sa_sd,
5763	sa_sd_storage,
5764	sa_none,
5765};
5766
5767/*
5768 * Build an iteration mask that can exclude certain CPUs from the upwards
5769 * domain traversal.
5770 *
5771 * Asymmetric node setups can result in situations where the domain tree is of
5772 * unequal depth, make sure to skip domains that already cover the entire
5773 * range.
5774 *
5775 * In that case build_sched_domains() will have terminated the iteration early
5776 * and our sibling sd spans will be empty. Domains should always include the
5777 * cpu they're built on, so check that.
5778 *
5779 */
5780static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5781{
5782	const struct cpumask *span = sched_domain_span(sd);
5783	struct sd_data *sdd = sd->private;
5784	struct sched_domain *sibling;
5785	int i;
5786
5787	for_each_cpu(i, span) {
5788		sibling = *per_cpu_ptr(sdd->sd, i);
5789		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5790			continue;
5791
5792		cpumask_set_cpu(i, sched_group_mask(sg));
5793	}
5794}
5795
5796/*
5797 * Return the canonical balance cpu for this group, this is the first cpu
5798 * of this group that's also in the iteration mask.
5799 */
5800int group_balance_cpu(struct sched_group *sg)
5801{
5802	return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5803}
5804
5805static int
5806build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5807{
5808	struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5809	const struct cpumask *span = sched_domain_span(sd);
5810	struct cpumask *covered = sched_domains_tmpmask;
5811	struct sd_data *sdd = sd->private;
5812	struct sched_domain *sibling;
5813	int i;
5814
5815	cpumask_clear(covered);
5816
5817	for_each_cpu(i, span) {
5818		struct cpumask *sg_span;
5819
5820		if (cpumask_test_cpu(i, covered))
5821			continue;
5822
5823		sibling = *per_cpu_ptr(sdd->sd, i);
5824
5825		/* See the comment near build_group_mask(). */
5826		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5827			continue;
5828
5829		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5830				GFP_KERNEL, cpu_to_node(cpu));
5831
5832		if (!sg)
5833			goto fail;
5834
5835		sg_span = sched_group_cpus(sg);
5836		if (sibling->child)
5837			cpumask_copy(sg_span, sched_domain_span(sibling->child));
5838		else
5839			cpumask_set_cpu(i, sg_span);
5840
5841		cpumask_or(covered, covered, sg_span);
5842
5843		sg->sgc = *per_cpu_ptr(sdd->sgc, i);
5844		if (atomic_inc_return(&sg->sgc->ref) == 1)
5845			build_group_mask(sd, sg);
5846
5847		/*
5848		 * Initialize sgc->capacity such that even if we mess up the
5849		 * domains and no possible iteration will get us here, we won't
5850		 * die on a /0 trap.
5851		 */
5852		sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
5853		sg->sgc->capacity_orig = sg->sgc->capacity;
5854
5855		/*
5856		 * Make sure the first group of this domain contains the
5857		 * canonical balance cpu. Otherwise the sched_domain iteration
5858		 * breaks. See update_sg_lb_stats().
5859		 */
5860		if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
5861		    group_balance_cpu(sg) == cpu)
5862			groups = sg;
5863
5864		if (!first)
5865			first = sg;
5866		if (last)
5867			last->next = sg;
5868		last = sg;
5869		last->next = first;
5870	}
5871	sd->groups = groups;
5872
5873	return 0;
5874
5875fail:
5876	free_sched_groups(first, 0);
5877
5878	return -ENOMEM;
5879}
5880
5881static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
5882{
5883	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5884	struct sched_domain *child = sd->child;
5885
5886	if (child)
5887		cpu = cpumask_first(sched_domain_span(child));
5888
5889	if (sg) {
5890		*sg = *per_cpu_ptr(sdd->sg, cpu);
5891		(*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
5892		atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
5893	}
5894
5895	return cpu;
5896}
5897
5898/*
5899 * build_sched_groups will build a circular linked list of the groups
5900 * covered by the given span, and will set each group's ->cpumask correctly,
5901 * and ->cpu_capacity to 0.
5902 *
5903 * Assumes the sched_domain tree is fully constructed
5904 */
5905static int
5906build_sched_groups(struct sched_domain *sd, int cpu)
5907{
5908	struct sched_group *first = NULL, *last = NULL;
5909	struct sd_data *sdd = sd->private;
5910	const struct cpumask *span = sched_domain_span(sd);
5911	struct cpumask *covered;
5912	int i;
5913
5914	get_group(cpu, sdd, &sd->groups);
5915	atomic_inc(&sd->groups->ref);
5916
5917	if (cpu != cpumask_first(span))
5918		return 0;
5919
5920	lockdep_assert_held(&sched_domains_mutex);
5921	covered = sched_domains_tmpmask;
5922
5923	cpumask_clear(covered);
5924
5925	for_each_cpu(i, span) {
5926		struct sched_group *sg;
5927		int group, j;
5928
5929		if (cpumask_test_cpu(i, covered))
5930			continue;
5931
5932		group = get_group(i, sdd, &sg);
5933		cpumask_setall(sched_group_mask(sg));
5934
5935		for_each_cpu(j, span) {
5936			if (get_group(j, sdd, NULL) != group)
5937				continue;
5938
5939			cpumask_set_cpu(j, covered);
5940			cpumask_set_cpu(j, sched_group_cpus(sg));
5941		}
5942
5943		if (!first)
5944			first = sg;
5945		if (last)
5946			last->next = sg;
5947		last = sg;
5948	}
5949	last->next = first;
5950
5951	return 0;
5952}
5953
5954/*
5955 * Initialize sched groups cpu_capacity.
5956 *
5957 * cpu_capacity indicates the capacity of sched group, which is used while
5958 * distributing the load between different sched groups in a sched domain.
5959 * Typically cpu_capacity for all the groups in a sched domain will be same
5960 * unless there are asymmetries in the topology. If there are asymmetries,
5961 * group having more cpu_capacity will pickup more load compared to the
5962 * group having less cpu_capacity.
5963 */
5964static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
5965{
5966	struct sched_group *sg = sd->groups;
5967
5968	WARN_ON(!sg);
5969
5970	do {
5971		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
5972		sg = sg->next;
5973	} while (sg != sd->groups);
5974
5975	if (cpu != group_balance_cpu(sg))
5976		return;
5977
5978	update_group_capacity(sd, cpu);
5979	atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
5980}
5981
5982/*
5983 * Initializers for schedule domains
5984 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
5985 */
5986
5987static int default_relax_domain_level = -1;
5988int sched_domain_level_max;
5989
5990static int __init setup_relax_domain_level(char *str)
5991{
5992	if (kstrtoint(str, 0, &default_relax_domain_level))
5993		pr_warn("Unable to set relax_domain_level\n");
5994
5995	return 1;
5996}
5997__setup("relax_domain_level=", setup_relax_domain_level);
5998
5999static void set_domain_attribute(struct sched_domain *sd,
6000				 struct sched_domain_attr *attr)
6001{
6002	int request;
6003
6004	if (!attr || attr->relax_domain_level < 0) {
6005		if (default_relax_domain_level < 0)
6006			return;
6007		else
6008			request = default_relax_domain_level;
6009	} else
6010		request = attr->relax_domain_level;
6011	if (request < sd->level) {
6012		/* turn off idle balance on this domain */
6013		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6014	} else {
6015		/* turn on idle balance on this domain */
6016		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6017	}
6018}
6019
6020static void __sdt_free(const struct cpumask *cpu_map);
6021static int __sdt_alloc(const struct cpumask *cpu_map);
6022
6023static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6024				 const struct cpumask *cpu_map)
6025{
6026	switch (what) {
6027	case sa_rootdomain:
6028		if (!atomic_read(&d->rd->refcount))
6029			free_rootdomain(&d->rd->rcu); /* fall through */
6030	case sa_sd:
6031		free_percpu(d->sd); /* fall through */
6032	case sa_sd_storage:
6033		__sdt_free(cpu_map); /* fall through */
6034	case sa_none:
6035		break;
6036	}
6037}
6038
6039static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6040						   const struct cpumask *cpu_map)
6041{
6042	memset(d, 0, sizeof(*d));
6043
6044	if (__sdt_alloc(cpu_map))
6045		return sa_sd_storage;
6046	d->sd = alloc_percpu(struct sched_domain *);
6047	if (!d->sd)
6048		return sa_sd_storage;
6049	d->rd = alloc_rootdomain();
6050	if (!d->rd)
6051		return sa_sd;
6052	return sa_rootdomain;
6053}
6054
6055/*
6056 * NULL the sd_data elements we've used to build the sched_domain and
6057 * sched_group structure so that the subsequent __free_domain_allocs()
6058 * will not free the data we're using.
6059 */
6060static void claim_allocations(int cpu, struct sched_domain *sd)
6061{
6062	struct sd_data *sdd = sd->private;
6063
6064	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6065	*per_cpu_ptr(sdd->sd, cpu) = NULL;
6066
6067	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6068		*per_cpu_ptr(sdd->sg, cpu) = NULL;
6069
6070	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6071		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
6072}
6073
6074#ifdef CONFIG_NUMA
6075static int sched_domains_numa_levels;
6076static int *sched_domains_numa_distance;
6077static struct cpumask ***sched_domains_numa_masks;
6078static int sched_domains_curr_level;
6079#endif
6080
6081/*
6082 * SD_flags allowed in topology descriptions.
6083 *
6084 * SD_SHARE_CPUCAPACITY      - describes SMT topologies
6085 * SD_SHARE_PKG_RESOURCES - describes shared caches
6086 * SD_NUMA                - describes NUMA topologies
6087 * SD_SHARE_POWERDOMAIN   - describes shared power domain
6088 *
6089 * Odd one out:
6090 * SD_ASYM_PACKING        - describes SMT quirks
6091 */
6092#define TOPOLOGY_SD_FLAGS		\
6093	(SD_SHARE_CPUCAPACITY |		\
6094	 SD_SHARE_PKG_RESOURCES |	\
6095	 SD_NUMA |			\
6096	 SD_ASYM_PACKING |		\
6097	 SD_SHARE_POWERDOMAIN)
6098
6099static struct sched_domain *
6100sd_init(struct sched_domain_topology_level *tl, int cpu)
6101{
6102	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6103	int sd_weight, sd_flags = 0;
6104
6105#ifdef CONFIG_NUMA
6106	/*
6107	 * Ugly hack to pass state to sd_numa_mask()...
6108	 */
6109	sched_domains_curr_level = tl->numa_level;
6110#endif
6111
6112	sd_weight = cpumask_weight(tl->mask(cpu));
6113
6114	if (tl->sd_flags)
6115		sd_flags = (*tl->sd_flags)();
6116	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6117			"wrong sd_flags in topology description\n"))
6118		sd_flags &= ~TOPOLOGY_SD_FLAGS;
6119
6120	*sd = (struct sched_domain){
6121		.min_interval		= sd_weight,
6122		.max_interval		= 2*sd_weight,
6123		.busy_factor		= 32,
6124		.imbalance_pct		= 125,
6125
6126		.cache_nice_tries	= 0,
6127		.busy_idx		= 0,
6128		.idle_idx		= 0,
6129		.newidle_idx		= 0,
6130		.wake_idx		= 0,
6131		.forkexec_idx		= 0,
6132
6133		.flags			= 1*SD_LOAD_BALANCE
6134					| 1*SD_BALANCE_NEWIDLE
6135					| 1*SD_BALANCE_EXEC
6136					| 1*SD_BALANCE_FORK
6137					| 0*SD_BALANCE_WAKE
6138					| 1*SD_WAKE_AFFINE
6139					| 0*SD_SHARE_CPUCAPACITY
6140					| 0*SD_SHARE_PKG_RESOURCES
6141					| 0*SD_SERIALIZE
6142					| 0*SD_PREFER_SIBLING
6143					| 0*SD_NUMA
6144					| sd_flags
6145					,
6146
6147		.last_balance		= jiffies,
6148		.balance_interval	= sd_weight,
6149		.smt_gain		= 0,
6150		.max_newidle_lb_cost	= 0,
6151		.next_decay_max_lb_cost	= jiffies,
6152#ifdef CONFIG_SCHED_DEBUG
6153		.name			= tl->name,
6154#endif
6155	};
6156
6157	/*
6158	 * Convert topological properties into behaviour.
6159	 */
6160
6161	if (sd->flags & SD_SHARE_CPUCAPACITY) {
6162		sd->imbalance_pct = 110;
6163		sd->smt_gain = 1178; /* ~15% */
6164
6165	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6166		sd->imbalance_pct = 117;
6167		sd->cache_nice_tries = 1;
6168		sd->busy_idx = 2;
6169
6170#ifdef CONFIG_NUMA
6171	} else if (sd->flags & SD_NUMA) {
6172		sd->cache_nice_tries = 2;
6173		sd->busy_idx = 3;
6174		sd->idle_idx = 2;
6175
6176		sd->flags |= SD_SERIALIZE;
6177		if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6178			sd->flags &= ~(SD_BALANCE_EXEC |
6179				       SD_BALANCE_FORK |
6180				       SD_WAKE_AFFINE);
6181		}
6182
6183#endif
6184	} else {
6185		sd->flags |= SD_PREFER_SIBLING;
6186		sd->cache_nice_tries = 1;
6187		sd->busy_idx = 2;
6188		sd->idle_idx = 1;
6189	}
6190
6191	sd->private = &tl->data;
6192
6193	return sd;
6194}
6195
6196/*
6197 * Topology list, bottom-up.
6198 */
6199static struct sched_domain_topology_level default_topology[] = {
6200#ifdef CONFIG_SCHED_SMT
6201	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6202#endif
6203#ifdef CONFIG_SCHED_MC
6204	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
6205#endif
6206	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
6207	{ NULL, },
6208};
6209
6210struct sched_domain_topology_level *sched_domain_topology = default_topology;
6211
6212#define for_each_sd_topology(tl)			\
6213	for (tl = sched_domain_topology; tl->mask; tl++)
6214
6215void set_sched_topology(struct sched_domain_topology_level *tl)
6216{
6217	sched_domain_topology = tl;
6218}
6219
6220#ifdef CONFIG_NUMA
6221
6222static const struct cpumask *sd_numa_mask(int cpu)
6223{
6224	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6225}
6226
6227static void sched_numa_warn(const char *str)
6228{
6229	static int done = false;
6230	int i,j;
6231
6232	if (done)
6233		return;
6234
6235	done = true;
6236
6237	printk(KERN_WARNING "ERROR: %s\n\n", str);
6238
6239	for (i = 0; i < nr_node_ids; i++) {
6240		printk(KERN_WARNING "  ");
6241		for (j = 0; j < nr_node_ids; j++)
6242			printk(KERN_CONT "%02d ", node_distance(i,j));
6243		printk(KERN_CONT "\n");
6244	}
6245	printk(KERN_WARNING "\n");
6246}
6247
6248static bool find_numa_distance(int distance)
6249{
6250	int i;
6251
6252	if (distance == node_distance(0, 0))
6253		return true;
6254
6255	for (i = 0; i < sched_domains_numa_levels; i++) {
6256		if (sched_domains_numa_distance[i] == distance)
6257			return true;
6258	}
6259
6260	return false;
6261}
6262
6263static void sched_init_numa(void)
6264{
6265	int next_distance, curr_distance = node_distance(0, 0);
6266	struct sched_domain_topology_level *tl;
6267	int level = 0;
6268	int i, j, k;
6269
6270	sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6271	if (!sched_domains_numa_distance)
6272		return;
6273
6274	/*
6275	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6276	 * unique distances in the node_distance() table.
6277	 *
6278	 * Assumes node_distance(0,j) includes all distances in
6279	 * node_distance(i,j) in order to avoid cubic time.
6280	 */
6281	next_distance = curr_distance;
6282	for (i = 0; i < nr_node_ids; i++) {
6283		for (j = 0; j < nr_node_ids; j++) {
6284			for (k = 0; k < nr_node_ids; k++) {
6285				int distance = node_distance(i, k);
6286
6287				if (distance > curr_distance &&
6288				    (distance < next_distance ||
6289				     next_distance == curr_distance))
6290					next_distance = distance;
6291
6292				/*
6293				 * While not a strong assumption it would be nice to know
6294				 * about cases where if node A is connected to B, B is not
6295				 * equally connected to A.
6296				 */
6297				if (sched_debug() && node_distance(k, i) != distance)
6298					sched_numa_warn("Node-distance not symmetric");
6299
6300				if (sched_debug() && i && !find_numa_distance(distance))
6301					sched_numa_warn("Node-0 not representative");
6302			}
6303			if (next_distance != curr_distance) {
6304				sched_domains_numa_distance[level++] = next_distance;
6305				sched_domains_numa_levels = level;
6306				curr_distance = next_distance;
6307			} else break;
6308		}
6309
6310		/*
6311		 * In case of sched_debug() we verify the above assumption.
6312		 */
6313		if (!sched_debug())
6314			break;
6315	}
6316	/*
6317	 * 'level' contains the number of unique distances, excluding the
6318	 * identity distance node_distance(i,i).
6319	 *
6320	 * The sched_domains_numa_distance[] array includes the actual distance
6321	 * numbers.
6322	 */
6323
6324	/*
6325	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6326	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6327	 * the array will contain less then 'level' members. This could be
6328	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6329	 * in other functions.
6330	 *
6331	 * We reset it to 'level' at the end of this function.
6332	 */
6333	sched_domains_numa_levels = 0;
6334
6335	sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6336	if (!sched_domains_numa_masks)
6337		return;
6338
6339	/*
6340	 * Now for each level, construct a mask per node which contains all
6341	 * cpus of nodes that are that many hops away from us.
6342	 */
6343	for (i = 0; i < level; i++) {
6344		sched_domains_numa_masks[i] =
6345			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6346		if (!sched_domains_numa_masks[i])
6347			return;
6348
6349		for (j = 0; j < nr_node_ids; j++) {
6350			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6351			if (!mask)
6352				return;
6353
6354			sched_domains_numa_masks[i][j] = mask;
6355
6356			for (k = 0; k < nr_node_ids; k++) {
6357				if (node_distance(j, k) > sched_domains_numa_distance[i])
6358					continue;
6359
6360				cpumask_or(mask, mask, cpumask_of_node(k));
6361			}
6362		}
6363	}
6364
6365	/* Compute default topology size */
6366	for (i = 0; sched_domain_topology[i].mask; i++);
6367
6368	tl = kzalloc((i + level + 1) *
6369			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6370	if (!tl)
6371		return;
6372
6373	/*
6374	 * Copy the default topology bits..
6375	 */
6376	for (i = 0; sched_domain_topology[i].mask; i++)
6377		tl[i] = sched_domain_topology[i];
6378
6379	/*
6380	 * .. and append 'j' levels of NUMA goodness.
6381	 */
6382	for (j = 0; j < level; i++, j++) {
6383		tl[i] = (struct sched_domain_topology_level){
6384			.mask = sd_numa_mask,
6385			.sd_flags = cpu_numa_flags,
6386			.flags = SDTL_OVERLAP,
6387			.numa_level = j,
6388			SD_INIT_NAME(NUMA)
6389		};
6390	}
6391
6392	sched_domain_topology = tl;
6393
6394	sched_domains_numa_levels = level;
6395}
6396
6397static void sched_domains_numa_masks_set(int cpu)
6398{
6399	int i, j;
6400	int node = cpu_to_node(cpu);
6401
6402	for (i = 0; i < sched_domains_numa_levels; i++) {
6403		for (j = 0; j < nr_node_ids; j++) {
6404			if (node_distance(j, node) <= sched_domains_numa_distance[i])
6405				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6406		}
6407	}
6408}
6409
6410static void sched_domains_numa_masks_clear(int cpu)
6411{
6412	int i, j;
6413	for (i = 0; i < sched_domains_numa_levels; i++) {
6414		for (j = 0; j < nr_node_ids; j++)
6415			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6416	}
6417}
6418
6419/*
6420 * Update sched_domains_numa_masks[level][node] array when new cpus
6421 * are onlined.
6422 */
6423static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6424					   unsigned long action,
6425					   void *hcpu)
6426{
6427	int cpu = (long)hcpu;
6428
6429	switch (action & ~CPU_TASKS_FROZEN) {
6430	case CPU_ONLINE:
6431		sched_domains_numa_masks_set(cpu);
6432		break;
6433
6434	case CPU_DEAD:
6435		sched_domains_numa_masks_clear(cpu);
6436		break;
6437
6438	default:
6439		return NOTIFY_DONE;
6440	}
6441
6442	return NOTIFY_OK;
6443}
6444#else
6445static inline void sched_init_numa(void)
6446{
6447}
6448
6449static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6450					   unsigned long action,
6451					   void *hcpu)
6452{
6453	return 0;
6454}
6455#endif /* CONFIG_NUMA */
6456
6457static int __sdt_alloc(const struct cpumask *cpu_map)
6458{
6459	struct sched_domain_topology_level *tl;
6460	int j;
6461
6462	for_each_sd_topology(tl) {
6463		struct sd_data *sdd = &tl->data;
6464
6465		sdd->sd = alloc_percpu(struct sched_domain *);
6466		if (!sdd->sd)
6467			return -ENOMEM;
6468
6469		sdd->sg = alloc_percpu(struct sched_group *);
6470		if (!sdd->sg)
6471			return -ENOMEM;
6472
6473		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6474		if (!sdd->sgc)
6475			return -ENOMEM;
6476
6477		for_each_cpu(j, cpu_map) {
6478			struct sched_domain *sd;
6479			struct sched_group *sg;
6480			struct sched_group_capacity *sgc;
6481
6482		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6483					GFP_KERNEL, cpu_to_node(j));
6484			if (!sd)
6485				return -ENOMEM;
6486
6487			*per_cpu_ptr(sdd->sd, j) = sd;
6488
6489			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6490					GFP_KERNEL, cpu_to_node(j));
6491			if (!sg)
6492				return -ENOMEM;
6493
6494			sg->next = sg;
6495
6496			*per_cpu_ptr(sdd->sg, j) = sg;
6497
6498			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
6499					GFP_KERNEL, cpu_to_node(j));
6500			if (!sgc)
6501				return -ENOMEM;
6502
6503			*per_cpu_ptr(sdd->sgc, j) = sgc;
6504		}
6505	}
6506
6507	return 0;
6508}
6509
6510static void __sdt_free(const struct cpumask *cpu_map)
6511{
6512	struct sched_domain_topology_level *tl;
6513	int j;
6514
6515	for_each_sd_topology(tl) {
6516		struct sd_data *sdd = &tl->data;
6517
6518		for_each_cpu(j, cpu_map) {
6519			struct sched_domain *sd;
6520
6521			if (sdd->sd) {
6522				sd = *per_cpu_ptr(sdd->sd, j);
6523				if (sd && (sd->flags & SD_OVERLAP))
6524					free_sched_groups(sd->groups, 0);
6525				kfree(*per_cpu_ptr(sdd->sd, j));
6526			}
6527
6528			if (sdd->sg)
6529				kfree(*per_cpu_ptr(sdd->sg, j));
6530			if (sdd->sgc)
6531				kfree(*per_cpu_ptr(sdd->sgc, j));
6532		}
6533		free_percpu(sdd->sd);
6534		sdd->sd = NULL;
6535		free_percpu(sdd->sg);
6536		sdd->sg = NULL;
6537		free_percpu(sdd->sgc);
6538		sdd->sgc = NULL;
6539	}
6540}
6541
6542struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6543		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6544		struct sched_domain *child, int cpu)
6545{
6546	struct sched_domain *sd = sd_init(tl, cpu);
6547	if (!sd)
6548		return child;
6549
6550	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6551	if (child) {
6552		sd->level = child->level + 1;
6553		sched_domain_level_max = max(sched_domain_level_max, sd->level);
6554		child->parent = sd;
6555		sd->child = child;
6556
6557		if (!cpumask_subset(sched_domain_span(child),
6558				    sched_domain_span(sd))) {
6559			pr_err("BUG: arch topology borken\n");
6560#ifdef CONFIG_SCHED_DEBUG
6561			pr_err("     the %s domain not a subset of the %s domain\n",
6562					child->name, sd->name);
6563#endif
6564			/* Fixup, ensure @sd has at least @child cpus. */
6565			cpumask_or(sched_domain_span(sd),
6566				   sched_domain_span(sd),
6567				   sched_domain_span(child));
6568		}
6569
6570	}
6571	set_domain_attribute(sd, attr);
6572
6573	return sd;
6574}
6575
6576/*
6577 * Build sched domains for a given set of cpus and attach the sched domains
6578 * to the individual cpus
6579 */
6580static int build_sched_domains(const struct cpumask *cpu_map,
6581			       struct sched_domain_attr *attr)
6582{
6583	enum s_alloc alloc_state;
6584	struct sched_domain *sd;
6585	struct s_data d;
6586	int i, ret = -ENOMEM;
6587
6588	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6589	if (alloc_state != sa_rootdomain)
6590		goto error;
6591
6592	/* Set up domains for cpus specified by the cpu_map. */
6593	for_each_cpu(i, cpu_map) {
6594		struct sched_domain_topology_level *tl;
6595
6596		sd = NULL;
6597		for_each_sd_topology(tl) {
6598			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
6599			if (tl == sched_domain_topology)
6600				*per_cpu_ptr(d.sd, i) = sd;
6601			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6602				sd->flags |= SD_OVERLAP;
6603			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6604				break;
6605		}
6606	}
6607
6608	/* Build the groups for the domains */
6609	for_each_cpu(i, cpu_map) {
6610		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6611			sd->span_weight = cpumask_weight(sched_domain_span(sd));
6612			if (sd->flags & SD_OVERLAP) {
6613				if (build_overlap_sched_groups(sd, i))
6614					goto error;
6615			} else {
6616				if (build_sched_groups(sd, i))
6617					goto error;
6618			}
6619		}
6620	}
6621
6622	/* Calculate CPU capacity for physical packages and nodes */
6623	for (i = nr_cpumask_bits-1; i >= 0; i--) {
6624		if (!cpumask_test_cpu(i, cpu_map))
6625			continue;
6626
6627		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6628			claim_allocations(i, sd);
6629			init_sched_groups_capacity(i, sd);
6630		}
6631	}
6632
6633	/* Attach the domains */
6634	rcu_read_lock();
6635	for_each_cpu(i, cpu_map) {
6636		sd = *per_cpu_ptr(d.sd, i);
6637		cpu_attach_domain(sd, d.rd, i);
6638	}
6639	rcu_read_unlock();
6640
6641	ret = 0;
6642error:
6643	__free_domain_allocs(&d, alloc_state, cpu_map);
6644	return ret;
6645}
6646
6647static cpumask_var_t *doms_cur;	/* current sched domains */
6648static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
6649static struct sched_domain_attr *dattr_cur;
6650				/* attribues of custom domains in 'doms_cur' */
6651
6652/*
6653 * Special case: If a kmalloc of a doms_cur partition (array of
6654 * cpumask) fails, then fallback to a single sched domain,
6655 * as determined by the single cpumask fallback_doms.
6656 */
6657static cpumask_var_t fallback_doms;
6658
6659/*
6660 * arch_update_cpu_topology lets virtualized architectures update the
6661 * cpu core maps. It is supposed to return 1 if the topology changed
6662 * or 0 if it stayed the same.
6663 */
6664int __weak arch_update_cpu_topology(void)
6665{
6666	return 0;
6667}
6668
6669cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6670{
6671	int i;
6672	cpumask_var_t *doms;
6673
6674	doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6675	if (!doms)
6676		return NULL;
6677	for (i = 0; i < ndoms; i++) {
6678		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6679			free_sched_domains(doms, i);
6680			return NULL;
6681		}
6682	}
6683	return doms;
6684}
6685
6686void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6687{
6688	unsigned int i;
6689	for (i = 0; i < ndoms; i++)
6690		free_cpumask_var(doms[i]);
6691	kfree(doms);
6692}
6693
6694/*
6695 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6696 * For now this just excludes isolated cpus, but could be used to
6697 * exclude other special cases in the future.
6698 */
6699static int init_sched_domains(const struct cpumask *cpu_map)
6700{
6701	int err;
6702
6703	arch_update_cpu_topology();
6704	ndoms_cur = 1;
6705	doms_cur = alloc_sched_domains(ndoms_cur);
6706	if (!doms_cur)
6707		doms_cur = &fallback_doms;
6708	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6709	err = build_sched_domains(doms_cur[0], NULL);
6710	register_sched_domain_sysctl();
6711
6712	return err;
6713}
6714
6715/*
6716 * Detach sched domains from a group of cpus specified in cpu_map
6717 * These cpus will now be attached to the NULL domain
6718 */
6719static void detach_destroy_domains(const struct cpumask *cpu_map)
6720{
6721	int i;
6722
6723	rcu_read_lock();
6724	for_each_cpu(i, cpu_map)
6725		cpu_attach_domain(NULL, &def_root_domain, i);
6726	rcu_read_unlock();
6727}
6728
6729/* handle null as "default" */
6730static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6731			struct sched_domain_attr *new, int idx_new)
6732{
6733	struct sched_domain_attr tmp;
6734
6735	/* fast path */
6736	if (!new && !cur)
6737		return 1;
6738
6739	tmp = SD_ATTR_INIT;
6740	return !memcmp(cur ? (cur + idx_cur) : &tmp,
6741			new ? (new + idx_new) : &tmp,
6742			sizeof(struct sched_domain_attr));
6743}
6744
6745/*
6746 * Partition sched domains as specified by the 'ndoms_new'
6747 * cpumasks in the array doms_new[] of cpumasks. This compares
6748 * doms_new[] to the current sched domain partitioning, doms_cur[].
6749 * It destroys each deleted domain and builds each new domain.
6750 *
6751 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
6752 * The masks don't intersect (don't overlap.) We should setup one
6753 * sched domain for each mask. CPUs not in any of the cpumasks will
6754 * not be load balanced. If the same cpumask appears both in the
6755 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6756 * it as it is.
6757 *
6758 * The passed in 'doms_new' should be allocated using
6759 * alloc_sched_domains.  This routine takes ownership of it and will
6760 * free_sched_domains it when done with it. If the caller failed the
6761 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6762 * and partition_sched_domains() will fallback to the single partition
6763 * 'fallback_doms', it also forces the domains to be rebuilt.
6764 *
6765 * If doms_new == NULL it will be replaced with cpu_online_mask.
6766 * ndoms_new == 0 is a special case for destroying existing domains,
6767 * and it will not create the default domain.
6768 *
6769 * Call with hotplug lock held
6770 */
6771void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
6772			     struct sched_domain_attr *dattr_new)
6773{
6774	int i, j, n;
6775	int new_topology;
6776
6777	mutex_lock(&sched_domains_mutex);
6778
6779	/* always unregister in case we don't destroy any domains */
6780	unregister_sched_domain_sysctl();
6781
6782	/* Let architecture update cpu core mappings. */
6783	new_topology = arch_update_cpu_topology();
6784
6785	n = doms_new ? ndoms_new : 0;
6786
6787	/* Destroy deleted domains */
6788	for (i = 0; i < ndoms_cur; i++) {
6789		for (j = 0; j < n && !new_topology; j++) {
6790			if (cpumask_equal(doms_cur[i], doms_new[j])
6791			    && dattrs_equal(dattr_cur, i, dattr_new, j))
6792				goto match1;
6793		}
6794		/* no match - a current sched domain not in new doms_new[] */
6795		detach_destroy_domains(doms_cur[i]);
6796match1:
6797		;
6798	}
6799
6800	n = ndoms_cur;
6801	if (doms_new == NULL) {
6802		n = 0;
6803		doms_new = &fallback_doms;
6804		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
6805		WARN_ON_ONCE(dattr_new);
6806	}
6807
6808	/* Build new domains */
6809	for (i = 0; i < ndoms_new; i++) {
6810		for (j = 0; j < n && !new_topology; j++) {
6811			if (cpumask_equal(doms_new[i], doms_cur[j])
6812			    && dattrs_equal(dattr_new, i, dattr_cur, j))
6813				goto match2;
6814		}
6815		/* no match - add a new doms_new */
6816		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
6817match2:
6818		;
6819	}
6820
6821	/* Remember the new sched domains */
6822	if (doms_cur != &fallback_doms)
6823		free_sched_domains(doms_cur, ndoms_cur);
6824	kfree(dattr_cur);	/* kfree(NULL) is safe */
6825	doms_cur = doms_new;
6826	dattr_cur = dattr_new;
6827	ndoms_cur = ndoms_new;
6828
6829	register_sched_domain_sysctl();
6830
6831	mutex_unlock(&sched_domains_mutex);
6832}
6833
6834static int num_cpus_frozen;	/* used to mark begin/end of suspend/resume */
6835
6836/*
6837 * Update cpusets according to cpu_active mask.  If cpusets are
6838 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6839 * around partition_sched_domains().
6840 *
6841 * If we come here as part of a suspend/resume, don't touch cpusets because we
6842 * want to restore it back to its original state upon resume anyway.
6843 */
6844static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6845			     void *hcpu)
6846{
6847	switch (action) {
6848	case CPU_ONLINE_FROZEN:
6849	case CPU_DOWN_FAILED_FROZEN:
6850
6851		/*
6852		 * num_cpus_frozen tracks how many CPUs are involved in suspend
6853		 * resume sequence. As long as this is not the last online
6854		 * operation in the resume sequence, just build a single sched
6855		 * domain, ignoring cpusets.
6856		 */
6857		num_cpus_frozen--;
6858		if (likely(num_cpus_frozen)) {
6859			partition_sched_domains(1, NULL, NULL);
6860			break;
6861		}
6862
6863		/*
6864		 * This is the last CPU online operation. So fall through and
6865		 * restore the original sched domains by considering the
6866		 * cpuset configurations.
6867		 */
6868
6869	case CPU_ONLINE:
6870	case CPU_DOWN_FAILED:
6871		cpuset_update_active_cpus(true);
6872		break;
6873	default:
6874		return NOTIFY_DONE;
6875	}
6876	return NOTIFY_OK;
6877}
6878
6879static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6880			       void *hcpu)
6881{
6882	switch (action) {
6883	case CPU_DOWN_PREPARE:
6884		cpuset_update_active_cpus(false);
6885		break;
6886	case CPU_DOWN_PREPARE_FROZEN:
6887		num_cpus_frozen++;
6888		partition_sched_domains(1, NULL, NULL);
6889		break;
6890	default:
6891		return NOTIFY_DONE;
6892	}
6893	return NOTIFY_OK;
6894}
6895
6896void __init sched_init_smp(void)
6897{
6898	cpumask_var_t non_isolated_cpus;
6899
6900	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
6901	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
6902
6903	sched_init_numa();
6904
6905	/*
6906	 * There's no userspace yet to cause hotplug operations; hence all the
6907	 * cpu masks are stable and all blatant races in the below code cannot
6908	 * happen.
6909	 */
6910	mutex_lock(&sched_domains_mutex);
6911	init_sched_domains(cpu_active_mask);
6912	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6913	if (cpumask_empty(non_isolated_cpus))
6914		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
6915	mutex_unlock(&sched_domains_mutex);
6916
6917	hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
6918	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6919	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
6920
6921	init_hrtick();
6922
6923	/* Move init over to a non-isolated CPU */
6924	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
6925		BUG();
6926	sched_init_granularity();
6927	free_cpumask_var(non_isolated_cpus);
6928
6929	init_sched_rt_class();
6930	init_sched_dl_class();
6931}
6932#else
6933void __init sched_init_smp(void)
6934{
6935	sched_init_granularity();
6936}
6937#endif /* CONFIG_SMP */
6938
6939const_debug unsigned int sysctl_timer_migration = 1;
6940
6941int in_sched_functions(unsigned long addr)
6942{
6943	return in_lock_functions(addr) ||
6944		(addr >= (unsigned long)__sched_text_start
6945		&& addr < (unsigned long)__sched_text_end);
6946}
6947
6948#ifdef CONFIG_CGROUP_SCHED
6949/*
6950 * Default task group.
6951 * Every task in system belongs to this group at bootup.
6952 */
6953struct task_group root_task_group;
6954LIST_HEAD(task_groups);
6955#endif
6956
6957DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6958
6959void __init sched_init(void)
6960{
6961	int i, j;
6962	unsigned long alloc_size = 0, ptr;
6963
6964#ifdef CONFIG_FAIR_GROUP_SCHED
6965	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6966#endif
6967#ifdef CONFIG_RT_GROUP_SCHED
6968	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6969#endif
6970#ifdef CONFIG_CPUMASK_OFFSTACK
6971	alloc_size += num_possible_cpus() * cpumask_size();
6972#endif
6973	if (alloc_size) {
6974		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
6975
6976#ifdef CONFIG_FAIR_GROUP_SCHED
6977		root_task_group.se = (struct sched_entity **)ptr;
6978		ptr += nr_cpu_ids * sizeof(void **);
6979
6980		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
6981		ptr += nr_cpu_ids * sizeof(void **);
6982
6983#endif /* CONFIG_FAIR_GROUP_SCHED */
6984#ifdef CONFIG_RT_GROUP_SCHED
6985		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
6986		ptr += nr_cpu_ids * sizeof(void **);
6987
6988		root_task_group.rt_rq = (struct rt_rq **)ptr;
6989		ptr += nr_cpu_ids * sizeof(void **);
6990
6991#endif /* CONFIG_RT_GROUP_SCHED */
6992#ifdef CONFIG_CPUMASK_OFFSTACK
6993		for_each_possible_cpu(i) {
6994			per_cpu(load_balance_mask, i) = (void *)ptr;
6995			ptr += cpumask_size();
6996		}
6997#endif /* CONFIG_CPUMASK_OFFSTACK */
6998	}
6999
7000	init_rt_bandwidth(&def_rt_bandwidth,
7001			global_rt_period(), global_rt_runtime());
7002	init_dl_bandwidth(&def_dl_bandwidth,
7003			global_rt_period(), global_rt_runtime());
7004
7005#ifdef CONFIG_SMP
7006	init_defrootdomain();
7007#endif
7008
7009#ifdef CONFIG_RT_GROUP_SCHED
7010	init_rt_bandwidth(&root_task_group.rt_bandwidth,
7011			global_rt_period(), global_rt_runtime());
7012#endif /* CONFIG_RT_GROUP_SCHED */
7013
7014#ifdef CONFIG_CGROUP_SCHED
7015	list_add(&root_task_group.list, &task_groups);
7016	INIT_LIST_HEAD(&root_task_group.children);
7017	INIT_LIST_HEAD(&root_task_group.siblings);
7018	autogroup_init(&init_task);
7019
7020#endif /* CONFIG_CGROUP_SCHED */
7021
7022	for_each_possible_cpu(i) {
7023		struct rq *rq;
7024
7025		rq = cpu_rq(i);
7026		raw_spin_lock_init(&rq->lock);
7027		rq->nr_running = 0;
7028		rq->calc_load_active = 0;
7029		rq->calc_load_update = jiffies + LOAD_FREQ;
7030		init_cfs_rq(&rq->cfs);
7031		init_rt_rq(&rq->rt, rq);
7032		init_dl_rq(&rq->dl, rq);
7033#ifdef CONFIG_FAIR_GROUP_SCHED
7034		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7035		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7036		/*
7037		 * How much cpu bandwidth does root_task_group get?
7038		 *
7039		 * In case of task-groups formed thr' the cgroup filesystem, it
7040		 * gets 100% of the cpu resources in the system. This overall
7041		 * system cpu resource is divided among the tasks of
7042		 * root_task_group and its child task-groups in a fair manner,
7043		 * based on each entity's (task or task-group's) weight
7044		 * (se->load.weight).
7045		 *
7046		 * In other words, if root_task_group has 10 tasks of weight
7047		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7048		 * then A0's share of the cpu resource is:
7049		 *
7050		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
7051		 *
7052		 * We achieve this by letting root_task_group's tasks sit
7053		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
7054		 */
7055		init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7056		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7057#endif /* CONFIG_FAIR_GROUP_SCHED */
7058
7059		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7060#ifdef CONFIG_RT_GROUP_SCHED
7061		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7062#endif
7063
7064		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7065			rq->cpu_load[j] = 0;
7066
7067		rq->last_load_update_tick = jiffies;
7068
7069#ifdef CONFIG_SMP
7070		rq->sd = NULL;
7071		rq->rd = NULL;
7072		rq->cpu_capacity = SCHED_CAPACITY_SCALE;
7073		rq->post_schedule = 0;
7074		rq->active_balance = 0;
7075		rq->next_balance = jiffies;
7076		rq->push_cpu = 0;
7077		rq->cpu = i;
7078		rq->online = 0;
7079		rq->idle_stamp = 0;
7080		rq->avg_idle = 2*sysctl_sched_migration_cost;
7081		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
7082
7083		INIT_LIST_HEAD(&rq->cfs_tasks);
7084
7085		rq_attach_root(rq, &def_root_domain);
7086#ifdef CONFIG_NO_HZ_COMMON
7087		rq->nohz_flags = 0;
7088#endif
7089#ifdef CONFIG_NO_HZ_FULL
7090		rq->last_sched_tick = 0;
7091#endif
7092#endif
7093		init_rq_hrtick(rq);
7094		atomic_set(&rq->nr_iowait, 0);
7095	}
7096
7097	set_load_weight(&init_task);
7098
7099#ifdef CONFIG_PREEMPT_NOTIFIERS
7100	INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7101#endif
7102
7103	/*
7104	 * The boot idle thread does lazy MMU switching as well:
7105	 */
7106	atomic_inc(&init_mm.mm_count);
7107	enter_lazy_tlb(&init_mm, current);
7108
7109	/*
7110	 * Make us the idle thread. Technically, schedule() should not be
7111	 * called from this thread, however somewhere below it might be,
7112	 * but because we are the idle thread, we just pick up running again
7113	 * when this runqueue becomes "idle".
7114	 */
7115	init_idle(current, smp_processor_id());
7116
7117	calc_load_update = jiffies + LOAD_FREQ;
7118
7119	/*
7120	 * During early bootup we pretend to be a normal task:
7121	 */
7122	current->sched_class = &fair_sched_class;
7123
7124#ifdef CONFIG_SMP
7125	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7126	/* May be allocated at isolcpus cmdline parse time */
7127	if (cpu_isolated_map == NULL)
7128		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
7129	idle_thread_set_boot_cpu();
7130	set_cpu_rq_start_time();
7131#endif
7132	init_sched_fair_class();
7133
7134	scheduler_running = 1;
7135}
7136
7137#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7138static inline int preempt_count_equals(int preempt_offset)
7139{
7140	int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
7141
7142	return (nested == preempt_offset);
7143}
7144
7145void __might_sleep(const char *file, int line, int preempt_offset)
7146{
7147	static unsigned long prev_jiffy;	/* ratelimiting */
7148
7149	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
7150	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7151	     !is_idle_task(current)) ||
7152	    system_state != SYSTEM_RUNNING || oops_in_progress)
7153		return;
7154	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7155		return;
7156	prev_jiffy = jiffies;
7157
7158	printk(KERN_ERR
7159		"BUG: sleeping function called from invalid context at %s:%d\n",
7160			file, line);
7161	printk(KERN_ERR
7162		"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7163			in_atomic(), irqs_disabled(),
7164			current->pid, current->comm);
7165
7166	debug_show_held_locks(current);
7167	if (irqs_disabled())
7168		print_irqtrace_events(current);
7169#ifdef CONFIG_DEBUG_PREEMPT
7170	if (!preempt_count_equals(preempt_offset)) {
7171		pr_err("Preemption disabled at:");
7172		print_ip_sym(current->preempt_disable_ip);
7173		pr_cont("\n");
7174	}
7175#endif
7176	dump_stack();
7177}
7178EXPORT_SYMBOL(__might_sleep);
7179#endif
7180
7181#ifdef CONFIG_MAGIC_SYSRQ
7182static void normalize_task(struct rq *rq, struct task_struct *p)
7183{
7184	const struct sched_class *prev_class = p->sched_class;
7185	struct sched_attr attr = {
7186		.sched_policy = SCHED_NORMAL,
7187	};
7188	int old_prio = p->prio;
7189	int queued;
7190
7191	queued = task_on_rq_queued(p);
7192	if (queued)
7193		dequeue_task(rq, p, 0);
7194	__setscheduler(rq, p, &attr);
7195	if (queued) {
7196		enqueue_task(rq, p, 0);
7197		resched_curr(rq);
7198	}
7199
7200	check_class_changed(rq, p, prev_class, old_prio);
7201}
7202
7203void normalize_rt_tasks(void)
7204{
7205	struct task_struct *g, *p;
7206	unsigned long flags;
7207	struct rq *rq;
7208
7209	read_lock_irqsave(&tasklist_lock, flags);
7210	for_each_process_thread(g, p) {
7211		/*
7212		 * Only normalize user tasks:
7213		 */
7214		if (!p->mm)
7215			continue;
7216
7217		p->se.exec_start		= 0;
7218#ifdef CONFIG_SCHEDSTATS
7219		p->se.statistics.wait_start	= 0;
7220		p->se.statistics.sleep_start	= 0;
7221		p->se.statistics.block_start	= 0;
7222#endif
7223
7224		if (!dl_task(p) && !rt_task(p)) {
7225			/*
7226			 * Renice negative nice level userspace
7227			 * tasks back to 0:
7228			 */
7229			if (task_nice(p) < 0 && p->mm)
7230				set_user_nice(p, 0);
7231			continue;
7232		}
7233
7234		raw_spin_lock(&p->pi_lock);
7235		rq = __task_rq_lock(p);
7236
7237		normalize_task(rq, p);
7238
7239		__task_rq_unlock(rq);
7240		raw_spin_unlock(&p->pi_lock);
7241	}
7242	read_unlock_irqrestore(&tasklist_lock, flags);
7243}
7244
7245#endif /* CONFIG_MAGIC_SYSRQ */
7246
7247#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7248/*
7249 * These functions are only useful for the IA64 MCA handling, or kdb.
7250 *
7251 * They can only be called when the whole system has been
7252 * stopped - every CPU needs to be quiescent, and no scheduling
7253 * activity can take place. Using them for anything else would
7254 * be a serious bug, and as a result, they aren't even visible
7255 * under any other configuration.
7256 */
7257
7258/**
7259 * curr_task - return the current task for a given cpu.
7260 * @cpu: the processor in question.
7261 *
7262 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7263 *
7264 * Return: The current task for @cpu.
7265 */
7266struct task_struct *curr_task(int cpu)
7267{
7268	return cpu_curr(cpu);
7269}
7270
7271#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7272
7273#ifdef CONFIG_IA64
7274/**
7275 * set_curr_task - set the current task for a given cpu.
7276 * @cpu: the processor in question.
7277 * @p: the task pointer to set.
7278 *
7279 * Description: This function must only be used when non-maskable interrupts
7280 * are serviced on a separate stack. It allows the architecture to switch the
7281 * notion of the current task on a cpu in a non-blocking manner. This function
7282 * must be called with all CPU's synchronized, and interrupts disabled, the
7283 * and caller must save the original value of the current task (see
7284 * curr_task() above) and restore that value before reenabling interrupts and
7285 * re-starting the system.
7286 *
7287 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7288 */
7289void set_curr_task(int cpu, struct task_struct *p)
7290{
7291	cpu_curr(cpu) = p;
7292}
7293
7294#endif
7295
7296#ifdef CONFIG_CGROUP_SCHED
7297/* task_group_lock serializes the addition/removal of task groups */
7298static DEFINE_SPINLOCK(task_group_lock);
7299
7300static void free_sched_group(struct task_group *tg)
7301{
7302	free_fair_sched_group(tg);
7303	free_rt_sched_group(tg);
7304	autogroup_free(tg);
7305	kfree(tg);
7306}
7307
7308/* allocate runqueue etc for a new task group */
7309struct task_group *sched_create_group(struct task_group *parent)
7310{
7311	struct task_group *tg;
7312
7313	tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7314	if (!tg)
7315		return ERR_PTR(-ENOMEM);
7316
7317	if (!alloc_fair_sched_group(tg, parent))
7318		goto err;
7319
7320	if (!alloc_rt_sched_group(tg, parent))
7321		goto err;
7322
7323	return tg;
7324
7325err:
7326	free_sched_group(tg);
7327	return ERR_PTR(-ENOMEM);
7328}
7329
7330void sched_online_group(struct task_group *tg, struct task_group *parent)
7331{
7332	unsigned long flags;
7333
7334	spin_lock_irqsave(&task_group_lock, flags);
7335	list_add_rcu(&tg->list, &task_groups);
7336
7337	WARN_ON(!parent); /* root should already exist */
7338
7339	tg->parent = parent;
7340	INIT_LIST_HEAD(&tg->children);
7341	list_add_rcu(&tg->siblings, &parent->children);
7342	spin_unlock_irqrestore(&task_group_lock, flags);
7343}
7344
7345/* rcu callback to free various structures associated with a task group */
7346static void free_sched_group_rcu(struct rcu_head *rhp)
7347{
7348	/* now it should be safe to free those cfs_rqs */
7349	free_sched_group(container_of(rhp, struct task_group, rcu));
7350}
7351
7352/* Destroy runqueue etc associated with a task group */
7353void sched_destroy_group(struct task_group *tg)
7354{
7355	/* wait for possible concurrent references to cfs_rqs complete */
7356	call_rcu(&tg->rcu, free_sched_group_rcu);
7357}
7358
7359void sched_offline_group(struct task_group *tg)
7360{
7361	unsigned long flags;
7362	int i;
7363
7364	/* end participation in shares distribution */
7365	for_each_possible_cpu(i)
7366		unregister_fair_sched_group(tg, i);
7367
7368	spin_lock_irqsave(&task_group_lock, flags);
7369	list_del_rcu(&tg->list);
7370	list_del_rcu(&tg->siblings);
7371	spin_unlock_irqrestore(&task_group_lock, flags);
7372}
7373
7374/* change task's runqueue when it moves between groups.
7375 *	The caller of this function should have put the task in its new group
7376 *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7377 *	reflect its new group.
7378 */
7379void sched_move_task(struct task_struct *tsk)
7380{
7381	struct task_group *tg;
7382	int queued, running;
7383	unsigned long flags;
7384	struct rq *rq;
7385
7386	rq = task_rq_lock(tsk, &flags);
7387
7388	running = task_current(rq, tsk);
7389	queued = task_on_rq_queued(tsk);
7390
7391	if (queued)
7392		dequeue_task(rq, tsk, 0);
7393	if (unlikely(running))
7394		put_prev_task(rq, tsk);
7395
7396	tg = container_of(task_css_check(tsk, cpu_cgrp_id,
7397				lockdep_is_held(&tsk->sighand->siglock)),
7398			  struct task_group, css);
7399	tg = autogroup_task_group(tsk, tg);
7400	tsk->sched_task_group = tg;
7401
7402#ifdef CONFIG_FAIR_GROUP_SCHED
7403	if (tsk->sched_class->task_move_group)
7404		tsk->sched_class->task_move_group(tsk, queued);
7405	else
7406#endif
7407		set_task_rq(tsk, task_cpu(tsk));
7408
7409	if (unlikely(running))
7410		tsk->sched_class->set_curr_task(rq);
7411	if (queued)
7412		enqueue_task(rq, tsk, 0);
7413
7414	task_rq_unlock(rq, tsk, &flags);
7415}
7416#endif /* CONFIG_CGROUP_SCHED */
7417
7418#ifdef CONFIG_RT_GROUP_SCHED
7419/*
7420 * Ensure that the real time constraints are schedulable.
7421 */
7422static DEFINE_MUTEX(rt_constraints_mutex);
7423
7424/* Must be called with tasklist_lock held */
7425static inline int tg_has_rt_tasks(struct task_group *tg)
7426{
7427	struct task_struct *g, *p;
7428
7429	for_each_process_thread(g, p) {
7430		if (rt_task(p) && task_rq(p)->rt.tg == tg)
7431			return 1;
7432	}
7433
7434	return 0;
7435}
7436
7437struct rt_schedulable_data {
7438	struct task_group *tg;
7439	u64 rt_period;
7440	u64 rt_runtime;
7441};
7442
7443static int tg_rt_schedulable(struct task_group *tg, void *data)
7444{
7445	struct rt_schedulable_data *d = data;
7446	struct task_group *child;
7447	unsigned long total, sum = 0;
7448	u64 period, runtime;
7449
7450	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7451	runtime = tg->rt_bandwidth.rt_runtime;
7452
7453	if (tg == d->tg) {
7454		period = d->rt_period;
7455		runtime = d->rt_runtime;
7456	}
7457
7458	/*
7459	 * Cannot have more runtime than the period.
7460	 */
7461	if (runtime > period && runtime != RUNTIME_INF)
7462		return -EINVAL;
7463
7464	/*
7465	 * Ensure we don't starve existing RT tasks.
7466	 */
7467	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7468		return -EBUSY;
7469
7470	total = to_ratio(period, runtime);
7471
7472	/*
7473	 * Nobody can have more than the global setting allows.
7474	 */
7475	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7476		return -EINVAL;
7477
7478	/*
7479	 * The sum of our children's runtime should not exceed our own.
7480	 */
7481	list_for_each_entry_rcu(child, &tg->children, siblings) {
7482		period = ktime_to_ns(child->rt_bandwidth.rt_period);
7483		runtime = child->rt_bandwidth.rt_runtime;
7484
7485		if (child == d->tg) {
7486			period = d->rt_period;
7487			runtime = d->rt_runtime;
7488		}
7489
7490		sum += to_ratio(period, runtime);
7491	}
7492
7493	if (sum > total)
7494		return -EINVAL;
7495
7496	return 0;
7497}
7498
7499static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7500{
7501	int ret;
7502
7503	struct rt_schedulable_data data = {
7504		.tg = tg,
7505		.rt_period = period,
7506		.rt_runtime = runtime,
7507	};
7508
7509	rcu_read_lock();
7510	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7511	rcu_read_unlock();
7512
7513	return ret;
7514}
7515
7516static int tg_set_rt_bandwidth(struct task_group *tg,
7517		u64 rt_period, u64 rt_runtime)
7518{
7519	int i, err = 0;
7520
7521	mutex_lock(&rt_constraints_mutex);
7522	read_lock(&tasklist_lock);
7523	err = __rt_schedulable(tg, rt_period, rt_runtime);
7524	if (err)
7525		goto unlock;
7526
7527	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7528	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7529	tg->rt_bandwidth.rt_runtime = rt_runtime;
7530
7531	for_each_possible_cpu(i) {
7532		struct rt_rq *rt_rq = tg->rt_rq[i];
7533
7534		raw_spin_lock(&rt_rq->rt_runtime_lock);
7535		rt_rq->rt_runtime = rt_runtime;
7536		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7537	}
7538	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7539unlock:
7540	read_unlock(&tasklist_lock);
7541	mutex_unlock(&rt_constraints_mutex);
7542
7543	return err;
7544}
7545
7546static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7547{
7548	u64 rt_runtime, rt_period;
7549
7550	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7551	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7552	if (rt_runtime_us < 0)
7553		rt_runtime = RUNTIME_INF;
7554
7555	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7556}
7557
7558static long sched_group_rt_runtime(struct task_group *tg)
7559{
7560	u64 rt_runtime_us;
7561
7562	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
7563		return -1;
7564
7565	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
7566	do_div(rt_runtime_us, NSEC_PER_USEC);
7567	return rt_runtime_us;
7568}
7569
7570static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7571{
7572	u64 rt_runtime, rt_period;
7573
7574	rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7575	rt_runtime = tg->rt_bandwidth.rt_runtime;
7576
7577	if (rt_period == 0)
7578		return -EINVAL;
7579
7580	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7581}
7582
7583static long sched_group_rt_period(struct task_group *tg)
7584{
7585	u64 rt_period_us;
7586
7587	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7588	do_div(rt_period_us, NSEC_PER_USEC);
7589	return rt_period_us;
7590}
7591#endif /* CONFIG_RT_GROUP_SCHED */
7592
7593#ifdef CONFIG_RT_GROUP_SCHED
7594static int sched_rt_global_constraints(void)
7595{
7596	int ret = 0;
7597
7598	mutex_lock(&rt_constraints_mutex);
7599	read_lock(&tasklist_lock);
7600	ret = __rt_schedulable(NULL, 0, 0);
7601	read_unlock(&tasklist_lock);
7602	mutex_unlock(&rt_constraints_mutex);
7603
7604	return ret;
7605}
7606
7607static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7608{
7609	/* Don't accept realtime tasks when there is no way for them to run */
7610	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7611		return 0;
7612
7613	return 1;
7614}
7615
7616#else /* !CONFIG_RT_GROUP_SCHED */
7617static int sched_rt_global_constraints(void)
7618{
7619	unsigned long flags;
7620	int i, ret = 0;
7621
7622	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
7623	for_each_possible_cpu(i) {
7624		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7625
7626		raw_spin_lock(&rt_rq->rt_runtime_lock);
7627		rt_rq->rt_runtime = global_rt_runtime();
7628		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7629	}
7630	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7631
7632	return ret;
7633}
7634#endif /* CONFIG_RT_GROUP_SCHED */
7635
7636static int sched_dl_global_constraints(void)
7637{
7638	u64 runtime = global_rt_runtime();
7639	u64 period = global_rt_period();
7640	u64 new_bw = to_ratio(period, runtime);
7641	int cpu, ret = 0;
7642	unsigned long flags;
7643
7644	/*
7645	 * Here we want to check the bandwidth not being set to some
7646	 * value smaller than the currently allocated bandwidth in
7647	 * any of the root_domains.
7648	 *
7649	 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
7650	 * cycling on root_domains... Discussion on different/better
7651	 * solutions is welcome!
7652	 */
7653	for_each_possible_cpu(cpu) {
7654		struct dl_bw *dl_b = dl_bw_of(cpu);
7655
7656		raw_spin_lock_irqsave(&dl_b->lock, flags);
7657		if (new_bw < dl_b->total_bw)
7658			ret = -EBUSY;
7659		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7660
7661		if (ret)
7662			break;
7663	}
7664
7665	return ret;
7666}
7667
7668static void sched_dl_do_global(void)
7669{
7670	u64 new_bw = -1;
7671	int cpu;
7672	unsigned long flags;
7673
7674	def_dl_bandwidth.dl_period = global_rt_period();
7675	def_dl_bandwidth.dl_runtime = global_rt_runtime();
7676
7677	if (global_rt_runtime() != RUNTIME_INF)
7678		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
7679
7680	/*
7681	 * FIXME: As above...
7682	 */
7683	for_each_possible_cpu(cpu) {
7684		struct dl_bw *dl_b = dl_bw_of(cpu);
7685
7686		raw_spin_lock_irqsave(&dl_b->lock, flags);
7687		dl_b->bw = new_bw;
7688		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7689	}
7690}
7691
7692static int sched_rt_global_validate(void)
7693{
7694	if (sysctl_sched_rt_period <= 0)
7695		return -EINVAL;
7696
7697	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
7698		(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
7699		return -EINVAL;
7700
7701	return 0;
7702}
7703
7704static void sched_rt_do_global(void)
7705{
7706	def_rt_bandwidth.rt_runtime = global_rt_runtime();
7707	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
7708}
7709
7710int sched_rt_handler(struct ctl_table *table, int write,
7711		void __user *buffer, size_t *lenp,
7712		loff_t *ppos)
7713{
7714	int old_period, old_runtime;
7715	static DEFINE_MUTEX(mutex);
7716	int ret;
7717
7718	mutex_lock(&mutex);
7719	old_period = sysctl_sched_rt_period;
7720	old_runtime = sysctl_sched_rt_runtime;
7721
7722	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7723
7724	if (!ret && write) {
7725		ret = sched_rt_global_validate();
7726		if (ret)
7727			goto undo;
7728
7729		ret = sched_rt_global_constraints();
7730		if (ret)
7731			goto undo;
7732
7733		ret = sched_dl_global_constraints();
7734		if (ret)
7735			goto undo;
7736
7737		sched_rt_do_global();
7738		sched_dl_do_global();
7739	}
7740	if (0) {
7741undo:
7742		sysctl_sched_rt_period = old_period;
7743		sysctl_sched_rt_runtime = old_runtime;
7744	}
7745	mutex_unlock(&mutex);
7746
7747	return ret;
7748}
7749
7750int sched_rr_handler(struct ctl_table *table, int write,
7751		void __user *buffer, size_t *lenp,
7752		loff_t *ppos)
7753{
7754	int ret;
7755	static DEFINE_MUTEX(mutex);
7756
7757	mutex_lock(&mutex);
7758	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7759	/* make sure that internally we keep jiffies */
7760	/* also, writing zero resets timeslice to default */
7761	if (!ret && write) {
7762		sched_rr_timeslice = sched_rr_timeslice <= 0 ?
7763			RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
7764	}
7765	mutex_unlock(&mutex);
7766	return ret;
7767}
7768
7769#ifdef CONFIG_CGROUP_SCHED
7770
7771static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
7772{
7773	return css ? container_of(css, struct task_group, css) : NULL;
7774}
7775
7776static struct cgroup_subsys_state *
7777cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
7778{
7779	struct task_group *parent = css_tg(parent_css);
7780	struct task_group *tg;
7781
7782	if (!parent) {
7783		/* This is early initialization for the top cgroup */
7784		return &root_task_group.css;
7785	}
7786
7787	tg = sched_create_group(parent);
7788	if (IS_ERR(tg))
7789		return ERR_PTR(-ENOMEM);
7790
7791	return &tg->css;
7792}
7793
7794static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
7795{
7796	struct task_group *tg = css_tg(css);
7797	struct task_group *parent = css_tg(css->parent);
7798
7799	if (parent)
7800		sched_online_group(tg, parent);
7801	return 0;
7802}
7803
7804static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
7805{
7806	struct task_group *tg = css_tg(css);
7807
7808	sched_destroy_group(tg);
7809}
7810
7811static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
7812{
7813	struct task_group *tg = css_tg(css);
7814
7815	sched_offline_group(tg);
7816}
7817
7818static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7819				 struct cgroup_taskset *tset)
7820{
7821	struct task_struct *task;
7822
7823	cgroup_taskset_for_each(task, tset) {
7824#ifdef CONFIG_RT_GROUP_SCHED
7825		if (!sched_rt_can_attach(css_tg(css), task))
7826			return -EINVAL;
7827#else
7828		/* We don't support RT-tasks being in separate groups */
7829		if (task->sched_class != &fair_sched_class)
7830			return -EINVAL;
7831#endif
7832	}
7833	return 0;
7834}
7835
7836static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
7837			      struct cgroup_taskset *tset)
7838{
7839	struct task_struct *task;
7840
7841	cgroup_taskset_for_each(task, tset)
7842		sched_move_task(task);
7843}
7844
7845static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
7846			    struct cgroup_subsys_state *old_css,
7847			    struct task_struct *task)
7848{
7849	/*
7850	 * cgroup_exit() is called in the copy_process() failure path.
7851	 * Ignore this case since the task hasn't ran yet, this avoids
7852	 * trying to poke a half freed task state from generic code.
7853	 */
7854	if (!(task->flags & PF_EXITING))
7855		return;
7856
7857	sched_move_task(task);
7858}
7859
7860#ifdef CONFIG_FAIR_GROUP_SCHED
7861static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
7862				struct cftype *cftype, u64 shareval)
7863{
7864	return sched_group_set_shares(css_tg(css), scale_load(shareval));
7865}
7866
7867static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
7868			       struct cftype *cft)
7869{
7870	struct task_group *tg = css_tg(css);
7871
7872	return (u64) scale_load_down(tg->shares);
7873}
7874
7875#ifdef CONFIG_CFS_BANDWIDTH
7876static DEFINE_MUTEX(cfs_constraints_mutex);
7877
7878const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7879const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7880
7881static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7882
7883static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7884{
7885	int i, ret = 0, runtime_enabled, runtime_was_enabled;
7886	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7887
7888	if (tg == &root_task_group)
7889		return -EINVAL;
7890
7891	/*
7892	 * Ensure we have at some amount of bandwidth every period.  This is
7893	 * to prevent reaching a state of large arrears when throttled via
7894	 * entity_tick() resulting in prolonged exit starvation.
7895	 */
7896	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7897		return -EINVAL;
7898
7899	/*
7900	 * Likewise, bound things on the otherside by preventing insane quota
7901	 * periods.  This also allows us to normalize in computing quota
7902	 * feasibility.
7903	 */
7904	if (period > max_cfs_quota_period)
7905		return -EINVAL;
7906
7907	/*
7908	 * Prevent race between setting of cfs_rq->runtime_enabled and
7909	 * unthrottle_offline_cfs_rqs().
7910	 */
7911	get_online_cpus();
7912	mutex_lock(&cfs_constraints_mutex);
7913	ret = __cfs_schedulable(tg, period, quota);
7914	if (ret)
7915		goto out_unlock;
7916
7917	runtime_enabled = quota != RUNTIME_INF;
7918	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7919	/*
7920	 * If we need to toggle cfs_bandwidth_used, off->on must occur
7921	 * before making related changes, and on->off must occur afterwards
7922	 */
7923	if (runtime_enabled && !runtime_was_enabled)
7924		cfs_bandwidth_usage_inc();
7925	raw_spin_lock_irq(&cfs_b->lock);
7926	cfs_b->period = ns_to_ktime(period);
7927	cfs_b->quota = quota;
7928
7929	__refill_cfs_bandwidth_runtime(cfs_b);
7930	/* restart the period timer (if active) to handle new period expiry */
7931	if (runtime_enabled && cfs_b->timer_active) {
7932		/* force a reprogram */
7933		__start_cfs_bandwidth(cfs_b, true);
7934	}
7935	raw_spin_unlock_irq(&cfs_b->lock);
7936
7937	for_each_online_cpu(i) {
7938		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
7939		struct rq *rq = cfs_rq->rq;
7940
7941		raw_spin_lock_irq(&rq->lock);
7942		cfs_rq->runtime_enabled = runtime_enabled;
7943		cfs_rq->runtime_remaining = 0;
7944
7945		if (cfs_rq->throttled)
7946			unthrottle_cfs_rq(cfs_rq);
7947		raw_spin_unlock_irq(&rq->lock);
7948	}
7949	if (runtime_was_enabled && !runtime_enabled)
7950		cfs_bandwidth_usage_dec();
7951out_unlock:
7952	mutex_unlock(&cfs_constraints_mutex);
7953	put_online_cpus();
7954
7955	return ret;
7956}
7957
7958int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7959{
7960	u64 quota, period;
7961
7962	period = ktime_to_ns(tg->cfs_bandwidth.period);
7963	if (cfs_quota_us < 0)
7964		quota = RUNTIME_INF;
7965	else
7966		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7967
7968	return tg_set_cfs_bandwidth(tg, period, quota);
7969}
7970
7971long tg_get_cfs_quota(struct task_group *tg)
7972{
7973	u64 quota_us;
7974
7975	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
7976		return -1;
7977
7978	quota_us = tg->cfs_bandwidth.quota;
7979	do_div(quota_us, NSEC_PER_USEC);
7980
7981	return quota_us;
7982}
7983
7984int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7985{
7986	u64 quota, period;
7987
7988	period = (u64)cfs_period_us * NSEC_PER_USEC;
7989	quota = tg->cfs_bandwidth.quota;
7990
7991	return tg_set_cfs_bandwidth(tg, period, quota);
7992}
7993
7994long tg_get_cfs_period(struct task_group *tg)
7995{
7996	u64 cfs_period_us;
7997
7998	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
7999	do_div(cfs_period_us, NSEC_PER_USEC);
8000
8001	return cfs_period_us;
8002}
8003
8004static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8005				  struct cftype *cft)
8006{
8007	return tg_get_cfs_quota(css_tg(css));
8008}
8009
8010static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8011				   struct cftype *cftype, s64 cfs_quota_us)
8012{
8013	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
8014}
8015
8016static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8017				   struct cftype *cft)
8018{
8019	return tg_get_cfs_period(css_tg(css));
8020}
8021
8022static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8023				    struct cftype *cftype, u64 cfs_period_us)
8024{
8025	return tg_set_cfs_period(css_tg(css), cfs_period_us);
8026}
8027
8028struct cfs_schedulable_data {
8029	struct task_group *tg;
8030	u64 period, quota;
8031};
8032
8033/*
8034 * normalize group quota/period to be quota/max_period
8035 * note: units are usecs
8036 */
8037static u64 normalize_cfs_quota(struct task_group *tg,
8038			       struct cfs_schedulable_data *d)
8039{
8040	u64 quota, period;
8041
8042	if (tg == d->tg) {
8043		period = d->period;
8044		quota = d->quota;
8045	} else {
8046		period = tg_get_cfs_period(tg);
8047		quota = tg_get_cfs_quota(tg);
8048	}
8049
8050	/* note: these should typically be equivalent */
8051	if (quota == RUNTIME_INF || quota == -1)
8052		return RUNTIME_INF;
8053
8054	return to_ratio(period, quota);
8055}
8056
8057static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8058{
8059	struct cfs_schedulable_data *d = data;
8060	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8061	s64 quota = 0, parent_quota = -1;
8062
8063	if (!tg->parent) {
8064		quota = RUNTIME_INF;
8065	} else {
8066		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8067
8068		quota = normalize_cfs_quota(tg, d);
8069		parent_quota = parent_b->hierarchal_quota;
8070
8071		/*
8072		 * ensure max(child_quota) <= parent_quota, inherit when no
8073		 * limit is set
8074		 */
8075		if (quota == RUNTIME_INF)
8076			quota = parent_quota;
8077		else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8078			return -EINVAL;
8079	}
8080	cfs_b->hierarchal_quota = quota;
8081
8082	return 0;
8083}
8084
8085static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8086{
8087	int ret;
8088	struct cfs_schedulable_data data = {
8089		.tg = tg,
8090		.period = period,
8091		.quota = quota,
8092	};
8093
8094	if (quota != RUNTIME_INF) {
8095		do_div(data.period, NSEC_PER_USEC);
8096		do_div(data.quota, NSEC_PER_USEC);
8097	}
8098
8099	rcu_read_lock();
8100	ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8101	rcu_read_unlock();
8102
8103	return ret;
8104}
8105
8106static int cpu_stats_show(struct seq_file *sf, void *v)
8107{
8108	struct task_group *tg = css_tg(seq_css(sf));
8109	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8110
8111	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8112	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8113	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
8114
8115	return 0;
8116}
8117#endif /* CONFIG_CFS_BANDWIDTH */
8118#endif /* CONFIG_FAIR_GROUP_SCHED */
8119
8120#ifdef CONFIG_RT_GROUP_SCHED
8121static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8122				struct cftype *cft, s64 val)
8123{
8124	return sched_group_set_rt_runtime(css_tg(css), val);
8125}
8126
8127static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8128			       struct cftype *cft)
8129{
8130	return sched_group_rt_runtime(css_tg(css));
8131}
8132
8133static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8134				    struct cftype *cftype, u64 rt_period_us)
8135{
8136	return sched_group_set_rt_period(css_tg(css), rt_period_us);
8137}
8138
8139static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8140				   struct cftype *cft)
8141{
8142	return sched_group_rt_period(css_tg(css));
8143}
8144#endif /* CONFIG_RT_GROUP_SCHED */
8145
8146static struct cftype cpu_files[] = {
8147#ifdef CONFIG_FAIR_GROUP_SCHED
8148	{
8149		.name = "shares",
8150		.read_u64 = cpu_shares_read_u64,
8151		.write_u64 = cpu_shares_write_u64,
8152	},
8153#endif
8154#ifdef CONFIG_CFS_BANDWIDTH
8155	{
8156		.name = "cfs_quota_us",
8157		.read_s64 = cpu_cfs_quota_read_s64,
8158		.write_s64 = cpu_cfs_quota_write_s64,
8159	},
8160	{
8161		.name = "cfs_period_us",
8162		.read_u64 = cpu_cfs_period_read_u64,
8163		.write_u64 = cpu_cfs_period_write_u64,
8164	},
8165	{
8166		.name = "stat",
8167		.seq_show = cpu_stats_show,
8168	},
8169#endif
8170#ifdef CONFIG_RT_GROUP_SCHED
8171	{
8172		.name = "rt_runtime_us",
8173		.read_s64 = cpu_rt_runtime_read,
8174		.write_s64 = cpu_rt_runtime_write,
8175	},
8176	{
8177		.name = "rt_period_us",
8178		.read_u64 = cpu_rt_period_read_uint,
8179		.write_u64 = cpu_rt_period_write_uint,
8180	},
8181#endif
8182	{ }	/* terminate */
8183};
8184
8185struct cgroup_subsys cpu_cgrp_subsys = {
8186	.css_alloc	= cpu_cgroup_css_alloc,
8187	.css_free	= cpu_cgroup_css_free,
8188	.css_online	= cpu_cgroup_css_online,
8189	.css_offline	= cpu_cgroup_css_offline,
8190	.can_attach	= cpu_cgroup_can_attach,
8191	.attach		= cpu_cgroup_attach,
8192	.exit		= cpu_cgroup_exit,
8193	.legacy_cftypes	= cpu_files,
8194	.early_init	= 1,
8195};
8196
8197#endif	/* CONFIG_CGROUP_SCHED */
8198
8199void dump_cpu_task(int cpu)
8200{
8201	pr_info("Task dump for CPU %d:\n", cpu);
8202	sched_show_task(cpu_curr(cpu));
8203}
8204