core.c revision b8a216269ec0ce2e961d32e6d640d7010b8a818e
1/*
2 *  kernel/sched/core.c
3 *
4 *  Kernel scheduler and related syscalls
5 *
6 *  Copyright (C) 1991-2002  Linus Torvalds
7 *
8 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9 *		make semaphores SMP safe
10 *  1998-11-19	Implemented schedule_timeout() and related stuff
11 *		by Andrea Arcangeli
12 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
13 *		hybrid priority-list and round-robin design with
14 *		an array-switch method of distributing timeslices
15 *		and per-CPU runqueues.  Cleanups and useful suggestions
16 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
17 *  2003-09-03	Interactivity tuning by Con Kolivas.
18 *  2004-04-02	Scheduler domains code by Nick Piggin
19 *  2007-04-15  Work begun on replacing all interactivity tuning with a
20 *              fair scheduling design by Con Kolivas.
21 *  2007-05-05  Load balancing (smp-nice) and other improvements
22 *              by Peter Williams
23 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
24 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
25 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 *              Thomas Gleixner, Mike Kravetz
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
33#include <linux/uaccess.h>
34#include <linux/highmem.h>
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
37#include <linux/capability.h>
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
40#include <linux/debug_locks.h>
41#include <linux/perf_event.h>
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
45#include <linux/freezer.h>
46#include <linux/vmalloc.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/pid_namespace.h>
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
57#include <linux/proc_fs.h>
58#include <linux/seq_file.h>
59#include <linux/sysctl.h>
60#include <linux/syscalls.h>
61#include <linux/times.h>
62#include <linux/tsacct_kern.h>
63#include <linux/kprobes.h>
64#include <linux/delayacct.h>
65#include <linux/unistd.h>
66#include <linux/pagemap.h>
67#include <linux/hrtimer.h>
68#include <linux/tick.h>
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
71#include <linux/ftrace.h>
72#include <linux/slab.h>
73#include <linux/init_task.h>
74#include <linux/binfmts.h>
75#include <linux/context_tracking.h>
76
77#include <asm/switch_to.h>
78#include <asm/tlb.h>
79#include <asm/irq_regs.h>
80#include <asm/mutex.h>
81#ifdef CONFIG_PARAVIRT
82#include <asm/paravirt.h>
83#endif
84
85#include "sched.h"
86#include "../workqueue_internal.h"
87#include "../smpboot.h"
88
89#define CREATE_TRACE_POINTS
90#include <trace/events/sched.h>
91
92void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
93{
94	unsigned long delta;
95	ktime_t soft, hard, now;
96
97	for (;;) {
98		if (hrtimer_active(period_timer))
99			break;
100
101		now = hrtimer_cb_get_time(period_timer);
102		hrtimer_forward(period_timer, now, period);
103
104		soft = hrtimer_get_softexpires(period_timer);
105		hard = hrtimer_get_expires(period_timer);
106		delta = ktime_to_ns(ktime_sub(hard, soft));
107		__hrtimer_start_range_ns(period_timer, soft, delta,
108					 HRTIMER_MODE_ABS_PINNED, 0);
109	}
110}
111
112DEFINE_MUTEX(sched_domains_mutex);
113DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
114
115static void update_rq_clock_task(struct rq *rq, s64 delta);
116
117void update_rq_clock(struct rq *rq)
118{
119	s64 delta;
120
121	if (rq->skip_clock_update > 0)
122		return;
123
124	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
125	rq->clock += delta;
126	update_rq_clock_task(rq, delta);
127}
128
129/*
130 * Debugging: various feature bits
131 */
132
133#define SCHED_FEAT(name, enabled)	\
134	(1UL << __SCHED_FEAT_##name) * enabled |
135
136const_debug unsigned int sysctl_sched_features =
137#include "features.h"
138	0;
139
140#undef SCHED_FEAT
141
142#ifdef CONFIG_SCHED_DEBUG
143#define SCHED_FEAT(name, enabled)	\
144	#name ,
145
146static const char * const sched_feat_names[] = {
147#include "features.h"
148};
149
150#undef SCHED_FEAT
151
152static int sched_feat_show(struct seq_file *m, void *v)
153{
154	int i;
155
156	for (i = 0; i < __SCHED_FEAT_NR; i++) {
157		if (!(sysctl_sched_features & (1UL << i)))
158			seq_puts(m, "NO_");
159		seq_printf(m, "%s ", sched_feat_names[i]);
160	}
161	seq_puts(m, "\n");
162
163	return 0;
164}
165
166#ifdef HAVE_JUMP_LABEL
167
168#define jump_label_key__true  STATIC_KEY_INIT_TRUE
169#define jump_label_key__false STATIC_KEY_INIT_FALSE
170
171#define SCHED_FEAT(name, enabled)	\
172	jump_label_key__##enabled ,
173
174struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
175#include "features.h"
176};
177
178#undef SCHED_FEAT
179
180static void sched_feat_disable(int i)
181{
182	if (static_key_enabled(&sched_feat_keys[i]))
183		static_key_slow_dec(&sched_feat_keys[i]);
184}
185
186static void sched_feat_enable(int i)
187{
188	if (!static_key_enabled(&sched_feat_keys[i]))
189		static_key_slow_inc(&sched_feat_keys[i]);
190}
191#else
192static void sched_feat_disable(int i) { };
193static void sched_feat_enable(int i) { };
194#endif /* HAVE_JUMP_LABEL */
195
196static int sched_feat_set(char *cmp)
197{
198	int i;
199	int neg = 0;
200
201	if (strncmp(cmp, "NO_", 3) == 0) {
202		neg = 1;
203		cmp += 3;
204	}
205
206	for (i = 0; i < __SCHED_FEAT_NR; i++) {
207		if (strcmp(cmp, sched_feat_names[i]) == 0) {
208			if (neg) {
209				sysctl_sched_features &= ~(1UL << i);
210				sched_feat_disable(i);
211			} else {
212				sysctl_sched_features |= (1UL << i);
213				sched_feat_enable(i);
214			}
215			break;
216		}
217	}
218
219	return i;
220}
221
222static ssize_t
223sched_feat_write(struct file *filp, const char __user *ubuf,
224		size_t cnt, loff_t *ppos)
225{
226	char buf[64];
227	char *cmp;
228	int i;
229
230	if (cnt > 63)
231		cnt = 63;
232
233	if (copy_from_user(&buf, ubuf, cnt))
234		return -EFAULT;
235
236	buf[cnt] = 0;
237	cmp = strstrip(buf);
238
239	i = sched_feat_set(cmp);
240	if (i == __SCHED_FEAT_NR)
241		return -EINVAL;
242
243	*ppos += cnt;
244
245	return cnt;
246}
247
248static int sched_feat_open(struct inode *inode, struct file *filp)
249{
250	return single_open(filp, sched_feat_show, NULL);
251}
252
253static const struct file_operations sched_feat_fops = {
254	.open		= sched_feat_open,
255	.write		= sched_feat_write,
256	.read		= seq_read,
257	.llseek		= seq_lseek,
258	.release	= single_release,
259};
260
261static __init int sched_init_debug(void)
262{
263	debugfs_create_file("sched_features", 0644, NULL, NULL,
264			&sched_feat_fops);
265
266	return 0;
267}
268late_initcall(sched_init_debug);
269#endif /* CONFIG_SCHED_DEBUG */
270
271/*
272 * Number of tasks to iterate in a single balance run.
273 * Limited because this is done with IRQs disabled.
274 */
275const_debug unsigned int sysctl_sched_nr_migrate = 32;
276
277/*
278 * period over which we average the RT time consumption, measured
279 * in ms.
280 *
281 * default: 1s
282 */
283const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
284
285/*
286 * period over which we measure -rt task cpu usage in us.
287 * default: 1s
288 */
289unsigned int sysctl_sched_rt_period = 1000000;
290
291__read_mostly int scheduler_running;
292
293/*
294 * part of the period that we allow rt tasks to run in us.
295 * default: 0.95s
296 */
297int sysctl_sched_rt_runtime = 950000;
298
299
300
301/*
302 * __task_rq_lock - lock the rq @p resides on.
303 */
304static inline struct rq *__task_rq_lock(struct task_struct *p)
305	__acquires(rq->lock)
306{
307	struct rq *rq;
308
309	lockdep_assert_held(&p->pi_lock);
310
311	for (;;) {
312		rq = task_rq(p);
313		raw_spin_lock(&rq->lock);
314		if (likely(rq == task_rq(p)))
315			return rq;
316		raw_spin_unlock(&rq->lock);
317	}
318}
319
320/*
321 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
322 */
323static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
324	__acquires(p->pi_lock)
325	__acquires(rq->lock)
326{
327	struct rq *rq;
328
329	for (;;) {
330		raw_spin_lock_irqsave(&p->pi_lock, *flags);
331		rq = task_rq(p);
332		raw_spin_lock(&rq->lock);
333		if (likely(rq == task_rq(p)))
334			return rq;
335		raw_spin_unlock(&rq->lock);
336		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
337	}
338}
339
340static void __task_rq_unlock(struct rq *rq)
341	__releases(rq->lock)
342{
343	raw_spin_unlock(&rq->lock);
344}
345
346static inline void
347task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
348	__releases(rq->lock)
349	__releases(p->pi_lock)
350{
351	raw_spin_unlock(&rq->lock);
352	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
353}
354
355/*
356 * this_rq_lock - lock this runqueue and disable interrupts.
357 */
358static struct rq *this_rq_lock(void)
359	__acquires(rq->lock)
360{
361	struct rq *rq;
362
363	local_irq_disable();
364	rq = this_rq();
365	raw_spin_lock(&rq->lock);
366
367	return rq;
368}
369
370#ifdef CONFIG_SCHED_HRTICK
371/*
372 * Use HR-timers to deliver accurate preemption points.
373 */
374
375static void hrtick_clear(struct rq *rq)
376{
377	if (hrtimer_active(&rq->hrtick_timer))
378		hrtimer_cancel(&rq->hrtick_timer);
379}
380
381/*
382 * High-resolution timer tick.
383 * Runs from hardirq context with interrupts disabled.
384 */
385static enum hrtimer_restart hrtick(struct hrtimer *timer)
386{
387	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
388
389	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
390
391	raw_spin_lock(&rq->lock);
392	update_rq_clock(rq);
393	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
394	raw_spin_unlock(&rq->lock);
395
396	return HRTIMER_NORESTART;
397}
398
399#ifdef CONFIG_SMP
400
401static int __hrtick_restart(struct rq *rq)
402{
403	struct hrtimer *timer = &rq->hrtick_timer;
404	ktime_t time = hrtimer_get_softexpires(timer);
405
406	return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
407}
408
409/*
410 * called from hardirq (IPI) context
411 */
412static void __hrtick_start(void *arg)
413{
414	struct rq *rq = arg;
415
416	raw_spin_lock(&rq->lock);
417	__hrtick_restart(rq);
418	rq->hrtick_csd_pending = 0;
419	raw_spin_unlock(&rq->lock);
420}
421
422/*
423 * Called to set the hrtick timer state.
424 *
425 * called with rq->lock held and irqs disabled
426 */
427void hrtick_start(struct rq *rq, u64 delay)
428{
429	struct hrtimer *timer = &rq->hrtick_timer;
430	ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
431
432	hrtimer_set_expires(timer, time);
433
434	if (rq == this_rq()) {
435		__hrtick_restart(rq);
436	} else if (!rq->hrtick_csd_pending) {
437		__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
438		rq->hrtick_csd_pending = 1;
439	}
440}
441
442static int
443hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
444{
445	int cpu = (int)(long)hcpu;
446
447	switch (action) {
448	case CPU_UP_CANCELED:
449	case CPU_UP_CANCELED_FROZEN:
450	case CPU_DOWN_PREPARE:
451	case CPU_DOWN_PREPARE_FROZEN:
452	case CPU_DEAD:
453	case CPU_DEAD_FROZEN:
454		hrtick_clear(cpu_rq(cpu));
455		return NOTIFY_OK;
456	}
457
458	return NOTIFY_DONE;
459}
460
461static __init void init_hrtick(void)
462{
463	hotcpu_notifier(hotplug_hrtick, 0);
464}
465#else
466/*
467 * Called to set the hrtick timer state.
468 *
469 * called with rq->lock held and irqs disabled
470 */
471void hrtick_start(struct rq *rq, u64 delay)
472{
473	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
474			HRTIMER_MODE_REL_PINNED, 0);
475}
476
477static inline void init_hrtick(void)
478{
479}
480#endif /* CONFIG_SMP */
481
482static void init_rq_hrtick(struct rq *rq)
483{
484#ifdef CONFIG_SMP
485	rq->hrtick_csd_pending = 0;
486
487	rq->hrtick_csd.flags = 0;
488	rq->hrtick_csd.func = __hrtick_start;
489	rq->hrtick_csd.info = rq;
490#endif
491
492	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
493	rq->hrtick_timer.function = hrtick;
494}
495#else	/* CONFIG_SCHED_HRTICK */
496static inline void hrtick_clear(struct rq *rq)
497{
498}
499
500static inline void init_rq_hrtick(struct rq *rq)
501{
502}
503
504static inline void init_hrtick(void)
505{
506}
507#endif	/* CONFIG_SCHED_HRTICK */
508
509/*
510 * resched_task - mark a task 'to be rescheduled now'.
511 *
512 * On UP this means the setting of the need_resched flag, on SMP it
513 * might also involve a cross-CPU call to trigger the scheduler on
514 * the target CPU.
515 */
516void resched_task(struct task_struct *p)
517{
518	int cpu;
519
520	lockdep_assert_held(&task_rq(p)->lock);
521
522	if (test_tsk_need_resched(p))
523		return;
524
525	set_tsk_need_resched(p);
526
527	cpu = task_cpu(p);
528	if (cpu == smp_processor_id()) {
529		set_preempt_need_resched();
530		return;
531	}
532
533	/* NEED_RESCHED must be visible before we test polling */
534	smp_mb();
535	if (!tsk_is_polling(p))
536		smp_send_reschedule(cpu);
537}
538
539void resched_cpu(int cpu)
540{
541	struct rq *rq = cpu_rq(cpu);
542	unsigned long flags;
543
544	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
545		return;
546	resched_task(cpu_curr(cpu));
547	raw_spin_unlock_irqrestore(&rq->lock, flags);
548}
549
550#ifdef CONFIG_SMP
551#ifdef CONFIG_NO_HZ_COMMON
552/*
553 * In the semi idle case, use the nearest busy cpu for migrating timers
554 * from an idle cpu.  This is good for power-savings.
555 *
556 * We don't do similar optimization for completely idle system, as
557 * selecting an idle cpu will add more delays to the timers than intended
558 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
559 */
560int get_nohz_timer_target(void)
561{
562	int cpu = smp_processor_id();
563	int i;
564	struct sched_domain *sd;
565
566	rcu_read_lock();
567	for_each_domain(cpu, sd) {
568		for_each_cpu(i, sched_domain_span(sd)) {
569			if (!idle_cpu(i)) {
570				cpu = i;
571				goto unlock;
572			}
573		}
574	}
575unlock:
576	rcu_read_unlock();
577	return cpu;
578}
579/*
580 * When add_timer_on() enqueues a timer into the timer wheel of an
581 * idle CPU then this timer might expire before the next timer event
582 * which is scheduled to wake up that CPU. In case of a completely
583 * idle system the next event might even be infinite time into the
584 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
585 * leaves the inner idle loop so the newly added timer is taken into
586 * account when the CPU goes back to idle and evaluates the timer
587 * wheel for the next timer event.
588 */
589static void wake_up_idle_cpu(int cpu)
590{
591	struct rq *rq = cpu_rq(cpu);
592
593	if (cpu == smp_processor_id())
594		return;
595
596	/*
597	 * This is safe, as this function is called with the timer
598	 * wheel base lock of (cpu) held. When the CPU is on the way
599	 * to idle and has not yet set rq->curr to idle then it will
600	 * be serialized on the timer wheel base lock and take the new
601	 * timer into account automatically.
602	 */
603	if (rq->curr != rq->idle)
604		return;
605
606	/*
607	 * We can set TIF_RESCHED on the idle task of the other CPU
608	 * lockless. The worst case is that the other CPU runs the
609	 * idle task through an additional NOOP schedule()
610	 */
611	set_tsk_need_resched(rq->idle);
612
613	/* NEED_RESCHED must be visible before we test polling */
614	smp_mb();
615	if (!tsk_is_polling(rq->idle))
616		smp_send_reschedule(cpu);
617}
618
619static bool wake_up_full_nohz_cpu(int cpu)
620{
621	if (tick_nohz_full_cpu(cpu)) {
622		if (cpu != smp_processor_id() ||
623		    tick_nohz_tick_stopped())
624			smp_send_reschedule(cpu);
625		return true;
626	}
627
628	return false;
629}
630
631void wake_up_nohz_cpu(int cpu)
632{
633	if (!wake_up_full_nohz_cpu(cpu))
634		wake_up_idle_cpu(cpu);
635}
636
637static inline bool got_nohz_idle_kick(void)
638{
639	int cpu = smp_processor_id();
640
641	if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
642		return false;
643
644	if (idle_cpu(cpu) && !need_resched())
645		return true;
646
647	/*
648	 * We can't run Idle Load Balance on this CPU for this time so we
649	 * cancel it and clear NOHZ_BALANCE_KICK
650	 */
651	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
652	return false;
653}
654
655#else /* CONFIG_NO_HZ_COMMON */
656
657static inline bool got_nohz_idle_kick(void)
658{
659	return false;
660}
661
662#endif /* CONFIG_NO_HZ_COMMON */
663
664#ifdef CONFIG_NO_HZ_FULL
665bool sched_can_stop_tick(void)
666{
667       struct rq *rq;
668
669       rq = this_rq();
670
671       /* Make sure rq->nr_running update is visible after the IPI */
672       smp_rmb();
673
674       /* More than one running task need preemption */
675       if (rq->nr_running > 1)
676               return false;
677
678       return true;
679}
680#endif /* CONFIG_NO_HZ_FULL */
681
682void sched_avg_update(struct rq *rq)
683{
684	s64 period = sched_avg_period();
685
686	while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
687		/*
688		 * Inline assembly required to prevent the compiler
689		 * optimising this loop into a divmod call.
690		 * See __iter_div_u64_rem() for another example of this.
691		 */
692		asm("" : "+rm" (rq->age_stamp));
693		rq->age_stamp += period;
694		rq->rt_avg /= 2;
695	}
696}
697
698#endif /* CONFIG_SMP */
699
700#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
701			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
702/*
703 * Iterate task_group tree rooted at *from, calling @down when first entering a
704 * node and @up when leaving it for the final time.
705 *
706 * Caller must hold rcu_lock or sufficient equivalent.
707 */
708int walk_tg_tree_from(struct task_group *from,
709			     tg_visitor down, tg_visitor up, void *data)
710{
711	struct task_group *parent, *child;
712	int ret;
713
714	parent = from;
715
716down:
717	ret = (*down)(parent, data);
718	if (ret)
719		goto out;
720	list_for_each_entry_rcu(child, &parent->children, siblings) {
721		parent = child;
722		goto down;
723
724up:
725		continue;
726	}
727	ret = (*up)(parent, data);
728	if (ret || parent == from)
729		goto out;
730
731	child = parent;
732	parent = parent->parent;
733	if (parent)
734		goto up;
735out:
736	return ret;
737}
738
739int tg_nop(struct task_group *tg, void *data)
740{
741	return 0;
742}
743#endif
744
745static void set_load_weight(struct task_struct *p)
746{
747	int prio = p->static_prio - MAX_RT_PRIO;
748	struct load_weight *load = &p->se.load;
749
750	/*
751	 * SCHED_IDLE tasks get minimal weight:
752	 */
753	if (p->policy == SCHED_IDLE) {
754		load->weight = scale_load(WEIGHT_IDLEPRIO);
755		load->inv_weight = WMULT_IDLEPRIO;
756		return;
757	}
758
759	load->weight = scale_load(prio_to_weight[prio]);
760	load->inv_weight = prio_to_wmult[prio];
761}
762
763static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
764{
765	update_rq_clock(rq);
766	sched_info_queued(rq, p);
767	p->sched_class->enqueue_task(rq, p, flags);
768}
769
770static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
771{
772	update_rq_clock(rq);
773	sched_info_dequeued(rq, p);
774	p->sched_class->dequeue_task(rq, p, flags);
775}
776
777void activate_task(struct rq *rq, struct task_struct *p, int flags)
778{
779	if (task_contributes_to_load(p))
780		rq->nr_uninterruptible--;
781
782	enqueue_task(rq, p, flags);
783}
784
785void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
786{
787	if (task_contributes_to_load(p))
788		rq->nr_uninterruptible++;
789
790	dequeue_task(rq, p, flags);
791}
792
793static void update_rq_clock_task(struct rq *rq, s64 delta)
794{
795/*
796 * In theory, the compile should just see 0 here, and optimize out the call
797 * to sched_rt_avg_update. But I don't trust it...
798 */
799#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
800	s64 steal = 0, irq_delta = 0;
801#endif
802#ifdef CONFIG_IRQ_TIME_ACCOUNTING
803	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
804
805	/*
806	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
807	 * this case when a previous update_rq_clock() happened inside a
808	 * {soft,}irq region.
809	 *
810	 * When this happens, we stop ->clock_task and only update the
811	 * prev_irq_time stamp to account for the part that fit, so that a next
812	 * update will consume the rest. This ensures ->clock_task is
813	 * monotonic.
814	 *
815	 * It does however cause some slight miss-attribution of {soft,}irq
816	 * time, a more accurate solution would be to update the irq_time using
817	 * the current rq->clock timestamp, except that would require using
818	 * atomic ops.
819	 */
820	if (irq_delta > delta)
821		irq_delta = delta;
822
823	rq->prev_irq_time += irq_delta;
824	delta -= irq_delta;
825#endif
826#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
827	if (static_key_false((&paravirt_steal_rq_enabled))) {
828		u64 st;
829
830		steal = paravirt_steal_clock(cpu_of(rq));
831		steal -= rq->prev_steal_time_rq;
832
833		if (unlikely(steal > delta))
834			steal = delta;
835
836		st = steal_ticks(steal);
837		steal = st * TICK_NSEC;
838
839		rq->prev_steal_time_rq += steal;
840
841		delta -= steal;
842	}
843#endif
844
845	rq->clock_task += delta;
846
847#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
848	if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
849		sched_rt_avg_update(rq, irq_delta + steal);
850#endif
851}
852
853void sched_set_stop_task(int cpu, struct task_struct *stop)
854{
855	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
856	struct task_struct *old_stop = cpu_rq(cpu)->stop;
857
858	if (stop) {
859		/*
860		 * Make it appear like a SCHED_FIFO task, its something
861		 * userspace knows about and won't get confused about.
862		 *
863		 * Also, it will make PI more or less work without too
864		 * much confusion -- but then, stop work should not
865		 * rely on PI working anyway.
866		 */
867		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
868
869		stop->sched_class = &stop_sched_class;
870	}
871
872	cpu_rq(cpu)->stop = stop;
873
874	if (old_stop) {
875		/*
876		 * Reset it back to a normal scheduling class so that
877		 * it can die in pieces.
878		 */
879		old_stop->sched_class = &rt_sched_class;
880	}
881}
882
883/*
884 * __normal_prio - return the priority that is based on the static prio
885 */
886static inline int __normal_prio(struct task_struct *p)
887{
888	return p->static_prio;
889}
890
891/*
892 * Calculate the expected normal priority: i.e. priority
893 * without taking RT-inheritance into account. Might be
894 * boosted by interactivity modifiers. Changes upon fork,
895 * setprio syscalls, and whenever the interactivity
896 * estimator recalculates.
897 */
898static inline int normal_prio(struct task_struct *p)
899{
900	int prio;
901
902	if (task_has_rt_policy(p))
903		prio = MAX_RT_PRIO-1 - p->rt_priority;
904	else
905		prio = __normal_prio(p);
906	return prio;
907}
908
909/*
910 * Calculate the current priority, i.e. the priority
911 * taken into account by the scheduler. This value might
912 * be boosted by RT tasks, or might be boosted by
913 * interactivity modifiers. Will be RT if the task got
914 * RT-boosted. If not then it returns p->normal_prio.
915 */
916static int effective_prio(struct task_struct *p)
917{
918	p->normal_prio = normal_prio(p);
919	/*
920	 * If we are RT tasks or we were boosted to RT priority,
921	 * keep the priority unchanged. Otherwise, update priority
922	 * to the normal priority:
923	 */
924	if (!rt_prio(p->prio))
925		return p->normal_prio;
926	return p->prio;
927}
928
929/**
930 * task_curr - is this task currently executing on a CPU?
931 * @p: the task in question.
932 *
933 * Return: 1 if the task is currently executing. 0 otherwise.
934 */
935inline int task_curr(const struct task_struct *p)
936{
937	return cpu_curr(task_cpu(p)) == p;
938}
939
940static inline void check_class_changed(struct rq *rq, struct task_struct *p,
941				       const struct sched_class *prev_class,
942				       int oldprio)
943{
944	if (prev_class != p->sched_class) {
945		if (prev_class->switched_from)
946			prev_class->switched_from(rq, p);
947		p->sched_class->switched_to(rq, p);
948	} else if (oldprio != p->prio)
949		p->sched_class->prio_changed(rq, p, oldprio);
950}
951
952void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
953{
954	const struct sched_class *class;
955
956	if (p->sched_class == rq->curr->sched_class) {
957		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
958	} else {
959		for_each_class(class) {
960			if (class == rq->curr->sched_class)
961				break;
962			if (class == p->sched_class) {
963				resched_task(rq->curr);
964				break;
965			}
966		}
967	}
968
969	/*
970	 * A queue event has occurred, and we're going to schedule.  In
971	 * this case, we can save a useless back to back clock update.
972	 */
973	if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
974		rq->skip_clock_update = 1;
975}
976
977#ifdef CONFIG_SMP
978void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
979{
980#ifdef CONFIG_SCHED_DEBUG
981	/*
982	 * We should never call set_task_cpu() on a blocked task,
983	 * ttwu() will sort out the placement.
984	 */
985	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
986			!(task_preempt_count(p) & PREEMPT_ACTIVE));
987
988#ifdef CONFIG_LOCKDEP
989	/*
990	 * The caller should hold either p->pi_lock or rq->lock, when changing
991	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
992	 *
993	 * sched_move_task() holds both and thus holding either pins the cgroup,
994	 * see task_group().
995	 *
996	 * Furthermore, all task_rq users should acquire both locks, see
997	 * task_rq_lock().
998	 */
999	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1000				      lockdep_is_held(&task_rq(p)->lock)));
1001#endif
1002#endif
1003
1004	trace_sched_migrate_task(p, new_cpu);
1005
1006	if (task_cpu(p) != new_cpu) {
1007		if (p->sched_class->migrate_task_rq)
1008			p->sched_class->migrate_task_rq(p, new_cpu);
1009		p->se.nr_migrations++;
1010		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
1011	}
1012
1013	__set_task_cpu(p, new_cpu);
1014}
1015
1016static void __migrate_swap_task(struct task_struct *p, int cpu)
1017{
1018	if (p->on_rq) {
1019		struct rq *src_rq, *dst_rq;
1020
1021		src_rq = task_rq(p);
1022		dst_rq = cpu_rq(cpu);
1023
1024		deactivate_task(src_rq, p, 0);
1025		set_task_cpu(p, cpu);
1026		activate_task(dst_rq, p, 0);
1027		check_preempt_curr(dst_rq, p, 0);
1028	} else {
1029		/*
1030		 * Task isn't running anymore; make it appear like we migrated
1031		 * it before it went to sleep. This means on wakeup we make the
1032		 * previous cpu our targer instead of where it really is.
1033		 */
1034		p->wake_cpu = cpu;
1035	}
1036}
1037
1038struct migration_swap_arg {
1039	struct task_struct *src_task, *dst_task;
1040	int src_cpu, dst_cpu;
1041};
1042
1043static int migrate_swap_stop(void *data)
1044{
1045	struct migration_swap_arg *arg = data;
1046	struct rq *src_rq, *dst_rq;
1047	int ret = -EAGAIN;
1048
1049	src_rq = cpu_rq(arg->src_cpu);
1050	dst_rq = cpu_rq(arg->dst_cpu);
1051
1052	double_raw_lock(&arg->src_task->pi_lock,
1053			&arg->dst_task->pi_lock);
1054	double_rq_lock(src_rq, dst_rq);
1055	if (task_cpu(arg->dst_task) != arg->dst_cpu)
1056		goto unlock;
1057
1058	if (task_cpu(arg->src_task) != arg->src_cpu)
1059		goto unlock;
1060
1061	if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1062		goto unlock;
1063
1064	if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1065		goto unlock;
1066
1067	__migrate_swap_task(arg->src_task, arg->dst_cpu);
1068	__migrate_swap_task(arg->dst_task, arg->src_cpu);
1069
1070	ret = 0;
1071
1072unlock:
1073	double_rq_unlock(src_rq, dst_rq);
1074	raw_spin_unlock(&arg->dst_task->pi_lock);
1075	raw_spin_unlock(&arg->src_task->pi_lock);
1076
1077	return ret;
1078}
1079
1080/*
1081 * Cross migrate two tasks
1082 */
1083int migrate_swap(struct task_struct *cur, struct task_struct *p)
1084{
1085	struct migration_swap_arg arg;
1086	int ret = -EINVAL;
1087
1088	arg = (struct migration_swap_arg){
1089		.src_task = cur,
1090		.src_cpu = task_cpu(cur),
1091		.dst_task = p,
1092		.dst_cpu = task_cpu(p),
1093	};
1094
1095	if (arg.src_cpu == arg.dst_cpu)
1096		goto out;
1097
1098	/*
1099	 * These three tests are all lockless; this is OK since all of them
1100	 * will be re-checked with proper locks held further down the line.
1101	 */
1102	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1103		goto out;
1104
1105	if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1106		goto out;
1107
1108	if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1109		goto out;
1110
1111	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1112
1113out:
1114	return ret;
1115}
1116
1117struct migration_arg {
1118	struct task_struct *task;
1119	int dest_cpu;
1120};
1121
1122static int migration_cpu_stop(void *data);
1123
1124/*
1125 * wait_task_inactive - wait for a thread to unschedule.
1126 *
1127 * If @match_state is nonzero, it's the @p->state value just checked and
1128 * not expected to change.  If it changes, i.e. @p might have woken up,
1129 * then return zero.  When we succeed in waiting for @p to be off its CPU,
1130 * we return a positive number (its total switch count).  If a second call
1131 * a short while later returns the same number, the caller can be sure that
1132 * @p has remained unscheduled the whole time.
1133 *
1134 * The caller must ensure that the task *will* unschedule sometime soon,
1135 * else this function might spin for a *long* time. This function can't
1136 * be called with interrupts off, or it may introduce deadlock with
1137 * smp_call_function() if an IPI is sent by the same process we are
1138 * waiting to become inactive.
1139 */
1140unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1141{
1142	unsigned long flags;
1143	int running, on_rq;
1144	unsigned long ncsw;
1145	struct rq *rq;
1146
1147	for (;;) {
1148		/*
1149		 * We do the initial early heuristics without holding
1150		 * any task-queue locks at all. We'll only try to get
1151		 * the runqueue lock when things look like they will
1152		 * work out!
1153		 */
1154		rq = task_rq(p);
1155
1156		/*
1157		 * If the task is actively running on another CPU
1158		 * still, just relax and busy-wait without holding
1159		 * any locks.
1160		 *
1161		 * NOTE! Since we don't hold any locks, it's not
1162		 * even sure that "rq" stays as the right runqueue!
1163		 * But we don't care, since "task_running()" will
1164		 * return false if the runqueue has changed and p
1165		 * is actually now running somewhere else!
1166		 */
1167		while (task_running(rq, p)) {
1168			if (match_state && unlikely(p->state != match_state))
1169				return 0;
1170			cpu_relax();
1171		}
1172
1173		/*
1174		 * Ok, time to look more closely! We need the rq
1175		 * lock now, to be *sure*. If we're wrong, we'll
1176		 * just go back and repeat.
1177		 */
1178		rq = task_rq_lock(p, &flags);
1179		trace_sched_wait_task(p);
1180		running = task_running(rq, p);
1181		on_rq = p->on_rq;
1182		ncsw = 0;
1183		if (!match_state || p->state == match_state)
1184			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1185		task_rq_unlock(rq, p, &flags);
1186
1187		/*
1188		 * If it changed from the expected state, bail out now.
1189		 */
1190		if (unlikely(!ncsw))
1191			break;
1192
1193		/*
1194		 * Was it really running after all now that we
1195		 * checked with the proper locks actually held?
1196		 *
1197		 * Oops. Go back and try again..
1198		 */
1199		if (unlikely(running)) {
1200			cpu_relax();
1201			continue;
1202		}
1203
1204		/*
1205		 * It's not enough that it's not actively running,
1206		 * it must be off the runqueue _entirely_, and not
1207		 * preempted!
1208		 *
1209		 * So if it was still runnable (but just not actively
1210		 * running right now), it's preempted, and we should
1211		 * yield - it could be a while.
1212		 */
1213		if (unlikely(on_rq)) {
1214			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1215
1216			set_current_state(TASK_UNINTERRUPTIBLE);
1217			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1218			continue;
1219		}
1220
1221		/*
1222		 * Ahh, all good. It wasn't running, and it wasn't
1223		 * runnable, which means that it will never become
1224		 * running in the future either. We're all done!
1225		 */
1226		break;
1227	}
1228
1229	return ncsw;
1230}
1231
1232/***
1233 * kick_process - kick a running thread to enter/exit the kernel
1234 * @p: the to-be-kicked thread
1235 *
1236 * Cause a process which is running on another CPU to enter
1237 * kernel-mode, without any delay. (to get signals handled.)
1238 *
1239 * NOTE: this function doesn't have to take the runqueue lock,
1240 * because all it wants to ensure is that the remote task enters
1241 * the kernel. If the IPI races and the task has been migrated
1242 * to another CPU then no harm is done and the purpose has been
1243 * achieved as well.
1244 */
1245void kick_process(struct task_struct *p)
1246{
1247	int cpu;
1248
1249	preempt_disable();
1250	cpu = task_cpu(p);
1251	if ((cpu != smp_processor_id()) && task_curr(p))
1252		smp_send_reschedule(cpu);
1253	preempt_enable();
1254}
1255EXPORT_SYMBOL_GPL(kick_process);
1256#endif /* CONFIG_SMP */
1257
1258#ifdef CONFIG_SMP
1259/*
1260 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1261 */
1262static int select_fallback_rq(int cpu, struct task_struct *p)
1263{
1264	int nid = cpu_to_node(cpu);
1265	const struct cpumask *nodemask = NULL;
1266	enum { cpuset, possible, fail } state = cpuset;
1267	int dest_cpu;
1268
1269	/*
1270	 * If the node that the cpu is on has been offlined, cpu_to_node()
1271	 * will return -1. There is no cpu on the node, and we should
1272	 * select the cpu on the other node.
1273	 */
1274	if (nid != -1) {
1275		nodemask = cpumask_of_node(nid);
1276
1277		/* Look for allowed, online CPU in same node. */
1278		for_each_cpu(dest_cpu, nodemask) {
1279			if (!cpu_online(dest_cpu))
1280				continue;
1281			if (!cpu_active(dest_cpu))
1282				continue;
1283			if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1284				return dest_cpu;
1285		}
1286	}
1287
1288	for (;;) {
1289		/* Any allowed, online CPU? */
1290		for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1291			if (!cpu_online(dest_cpu))
1292				continue;
1293			if (!cpu_active(dest_cpu))
1294				continue;
1295			goto out;
1296		}
1297
1298		switch (state) {
1299		case cpuset:
1300			/* No more Mr. Nice Guy. */
1301			cpuset_cpus_allowed_fallback(p);
1302			state = possible;
1303			break;
1304
1305		case possible:
1306			do_set_cpus_allowed(p, cpu_possible_mask);
1307			state = fail;
1308			break;
1309
1310		case fail:
1311			BUG();
1312			break;
1313		}
1314	}
1315
1316out:
1317	if (state != cpuset) {
1318		/*
1319		 * Don't tell them about moving exiting tasks or
1320		 * kernel threads (both mm NULL), since they never
1321		 * leave kernel.
1322		 */
1323		if (p->mm && printk_ratelimit()) {
1324			printk_sched("process %d (%s) no longer affine to cpu%d\n",
1325					task_pid_nr(p), p->comm, cpu);
1326		}
1327	}
1328
1329	return dest_cpu;
1330}
1331
1332/*
1333 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1334 */
1335static inline
1336int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1337{
1338	cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1339
1340	/*
1341	 * In order not to call set_task_cpu() on a blocking task we need
1342	 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1343	 * cpu.
1344	 *
1345	 * Since this is common to all placement strategies, this lives here.
1346	 *
1347	 * [ this allows ->select_task() to simply return task_cpu(p) and
1348	 *   not worry about this generic constraint ]
1349	 */
1350	if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1351		     !cpu_online(cpu)))
1352		cpu = select_fallback_rq(task_cpu(p), p);
1353
1354	return cpu;
1355}
1356
1357static void update_avg(u64 *avg, u64 sample)
1358{
1359	s64 diff = sample - *avg;
1360	*avg += diff >> 3;
1361}
1362#endif
1363
1364static void
1365ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1366{
1367#ifdef CONFIG_SCHEDSTATS
1368	struct rq *rq = this_rq();
1369
1370#ifdef CONFIG_SMP
1371	int this_cpu = smp_processor_id();
1372
1373	if (cpu == this_cpu) {
1374		schedstat_inc(rq, ttwu_local);
1375		schedstat_inc(p, se.statistics.nr_wakeups_local);
1376	} else {
1377		struct sched_domain *sd;
1378
1379		schedstat_inc(p, se.statistics.nr_wakeups_remote);
1380		rcu_read_lock();
1381		for_each_domain(this_cpu, sd) {
1382			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1383				schedstat_inc(sd, ttwu_wake_remote);
1384				break;
1385			}
1386		}
1387		rcu_read_unlock();
1388	}
1389
1390	if (wake_flags & WF_MIGRATED)
1391		schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1392
1393#endif /* CONFIG_SMP */
1394
1395	schedstat_inc(rq, ttwu_count);
1396	schedstat_inc(p, se.statistics.nr_wakeups);
1397
1398	if (wake_flags & WF_SYNC)
1399		schedstat_inc(p, se.statistics.nr_wakeups_sync);
1400
1401#endif /* CONFIG_SCHEDSTATS */
1402}
1403
1404static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1405{
1406	activate_task(rq, p, en_flags);
1407	p->on_rq = 1;
1408
1409	/* if a worker is waking up, notify workqueue */
1410	if (p->flags & PF_WQ_WORKER)
1411		wq_worker_waking_up(p, cpu_of(rq));
1412}
1413
1414/*
1415 * Mark the task runnable and perform wakeup-preemption.
1416 */
1417static void
1418ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1419{
1420	check_preempt_curr(rq, p, wake_flags);
1421	trace_sched_wakeup(p, true);
1422
1423	p->state = TASK_RUNNING;
1424#ifdef CONFIG_SMP
1425	if (p->sched_class->task_woken)
1426		p->sched_class->task_woken(rq, p);
1427
1428	if (rq->idle_stamp) {
1429		u64 delta = rq_clock(rq) - rq->idle_stamp;
1430		u64 max = 2*rq->max_idle_balance_cost;
1431
1432		update_avg(&rq->avg_idle, delta);
1433
1434		if (rq->avg_idle > max)
1435			rq->avg_idle = max;
1436
1437		rq->idle_stamp = 0;
1438	}
1439#endif
1440}
1441
1442static void
1443ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1444{
1445#ifdef CONFIG_SMP
1446	if (p->sched_contributes_to_load)
1447		rq->nr_uninterruptible--;
1448#endif
1449
1450	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1451	ttwu_do_wakeup(rq, p, wake_flags);
1452}
1453
1454/*
1455 * Called in case the task @p isn't fully descheduled from its runqueue,
1456 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1457 * since all we need to do is flip p->state to TASK_RUNNING, since
1458 * the task is still ->on_rq.
1459 */
1460static int ttwu_remote(struct task_struct *p, int wake_flags)
1461{
1462	struct rq *rq;
1463	int ret = 0;
1464
1465	rq = __task_rq_lock(p);
1466	if (p->on_rq) {
1467		/* check_preempt_curr() may use rq clock */
1468		update_rq_clock(rq);
1469		ttwu_do_wakeup(rq, p, wake_flags);
1470		ret = 1;
1471	}
1472	__task_rq_unlock(rq);
1473
1474	return ret;
1475}
1476
1477#ifdef CONFIG_SMP
1478static void sched_ttwu_pending(void)
1479{
1480	struct rq *rq = this_rq();
1481	struct llist_node *llist = llist_del_all(&rq->wake_list);
1482	struct task_struct *p;
1483
1484	raw_spin_lock(&rq->lock);
1485
1486	while (llist) {
1487		p = llist_entry(llist, struct task_struct, wake_entry);
1488		llist = llist_next(llist);
1489		ttwu_do_activate(rq, p, 0);
1490	}
1491
1492	raw_spin_unlock(&rq->lock);
1493}
1494
1495void scheduler_ipi(void)
1496{
1497	/*
1498	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1499	 * TIF_NEED_RESCHED remotely (for the first time) will also send
1500	 * this IPI.
1501	 */
1502	if (tif_need_resched())
1503		set_preempt_need_resched();
1504
1505	if (llist_empty(&this_rq()->wake_list)
1506			&& !tick_nohz_full_cpu(smp_processor_id())
1507			&& !got_nohz_idle_kick())
1508		return;
1509
1510	/*
1511	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1512	 * traditionally all their work was done from the interrupt return
1513	 * path. Now that we actually do some work, we need to make sure
1514	 * we do call them.
1515	 *
1516	 * Some archs already do call them, luckily irq_enter/exit nest
1517	 * properly.
1518	 *
1519	 * Arguably we should visit all archs and update all handlers,
1520	 * however a fair share of IPIs are still resched only so this would
1521	 * somewhat pessimize the simple resched case.
1522	 */
1523	irq_enter();
1524	tick_nohz_full_check();
1525	sched_ttwu_pending();
1526
1527	/*
1528	 * Check if someone kicked us for doing the nohz idle load balance.
1529	 */
1530	if (unlikely(got_nohz_idle_kick())) {
1531		this_rq()->idle_balance = 1;
1532		raise_softirq_irqoff(SCHED_SOFTIRQ);
1533	}
1534	irq_exit();
1535}
1536
1537static void ttwu_queue_remote(struct task_struct *p, int cpu)
1538{
1539	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
1540		smp_send_reschedule(cpu);
1541}
1542
1543bool cpus_share_cache(int this_cpu, int that_cpu)
1544{
1545	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1546}
1547#endif /* CONFIG_SMP */
1548
1549static void ttwu_queue(struct task_struct *p, int cpu)
1550{
1551	struct rq *rq = cpu_rq(cpu);
1552
1553#if defined(CONFIG_SMP)
1554	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1555		sched_clock_cpu(cpu); /* sync clocks x-cpu */
1556		ttwu_queue_remote(p, cpu);
1557		return;
1558	}
1559#endif
1560
1561	raw_spin_lock(&rq->lock);
1562	ttwu_do_activate(rq, p, 0);
1563	raw_spin_unlock(&rq->lock);
1564}
1565
1566/**
1567 * try_to_wake_up - wake up a thread
1568 * @p: the thread to be awakened
1569 * @state: the mask of task states that can be woken
1570 * @wake_flags: wake modifier flags (WF_*)
1571 *
1572 * Put it on the run-queue if it's not already there. The "current"
1573 * thread is always on the run-queue (except when the actual
1574 * re-schedule is in progress), and as such you're allowed to do
1575 * the simpler "current->state = TASK_RUNNING" to mark yourself
1576 * runnable without the overhead of this.
1577 *
1578 * Return: %true if @p was woken up, %false if it was already running.
1579 * or @state didn't match @p's state.
1580 */
1581static int
1582try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1583{
1584	unsigned long flags;
1585	int cpu, success = 0;
1586
1587	/*
1588	 * If we are going to wake up a thread waiting for CONDITION we
1589	 * need to ensure that CONDITION=1 done by the caller can not be
1590	 * reordered with p->state check below. This pairs with mb() in
1591	 * set_current_state() the waiting thread does.
1592	 */
1593	smp_mb__before_spinlock();
1594	raw_spin_lock_irqsave(&p->pi_lock, flags);
1595	if (!(p->state & state))
1596		goto out;
1597
1598	success = 1; /* we're going to change ->state */
1599	cpu = task_cpu(p);
1600
1601	if (p->on_rq && ttwu_remote(p, wake_flags))
1602		goto stat;
1603
1604#ifdef CONFIG_SMP
1605	/*
1606	 * If the owning (remote) cpu is still in the middle of schedule() with
1607	 * this task as prev, wait until its done referencing the task.
1608	 */
1609	while (p->on_cpu)
1610		cpu_relax();
1611	/*
1612	 * Pairs with the smp_wmb() in finish_lock_switch().
1613	 */
1614	smp_rmb();
1615
1616	p->sched_contributes_to_load = !!task_contributes_to_load(p);
1617	p->state = TASK_WAKING;
1618
1619	if (p->sched_class->task_waking)
1620		p->sched_class->task_waking(p);
1621
1622	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
1623	if (task_cpu(p) != cpu) {
1624		wake_flags |= WF_MIGRATED;
1625		set_task_cpu(p, cpu);
1626	}
1627#endif /* CONFIG_SMP */
1628
1629	ttwu_queue(p, cpu);
1630stat:
1631	ttwu_stat(p, cpu, wake_flags);
1632out:
1633	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1634
1635	return success;
1636}
1637
1638/**
1639 * try_to_wake_up_local - try to wake up a local task with rq lock held
1640 * @p: the thread to be awakened
1641 *
1642 * Put @p on the run-queue if it's not already there. The caller must
1643 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1644 * the current task.
1645 */
1646static void try_to_wake_up_local(struct task_struct *p)
1647{
1648	struct rq *rq = task_rq(p);
1649
1650	if (WARN_ON_ONCE(rq != this_rq()) ||
1651	    WARN_ON_ONCE(p == current))
1652		return;
1653
1654	lockdep_assert_held(&rq->lock);
1655
1656	if (!raw_spin_trylock(&p->pi_lock)) {
1657		raw_spin_unlock(&rq->lock);
1658		raw_spin_lock(&p->pi_lock);
1659		raw_spin_lock(&rq->lock);
1660	}
1661
1662	if (!(p->state & TASK_NORMAL))
1663		goto out;
1664
1665	if (!p->on_rq)
1666		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1667
1668	ttwu_do_wakeup(rq, p, 0);
1669	ttwu_stat(p, smp_processor_id(), 0);
1670out:
1671	raw_spin_unlock(&p->pi_lock);
1672}
1673
1674/**
1675 * wake_up_process - Wake up a specific process
1676 * @p: The process to be woken up.
1677 *
1678 * Attempt to wake up the nominated process and move it to the set of runnable
1679 * processes.
1680 *
1681 * Return: 1 if the process was woken up, 0 if it was already running.
1682 *
1683 * It may be assumed that this function implies a write memory barrier before
1684 * changing the task state if and only if any tasks are woken up.
1685 */
1686int wake_up_process(struct task_struct *p)
1687{
1688	WARN_ON(task_is_stopped_or_traced(p));
1689	return try_to_wake_up(p, TASK_NORMAL, 0);
1690}
1691EXPORT_SYMBOL(wake_up_process);
1692
1693int wake_up_state(struct task_struct *p, unsigned int state)
1694{
1695	return try_to_wake_up(p, state, 0);
1696}
1697
1698/*
1699 * Perform scheduler related setup for a newly forked process p.
1700 * p is forked by current.
1701 *
1702 * __sched_fork() is basic setup used by init_idle() too:
1703 */
1704static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1705{
1706	p->on_rq			= 0;
1707
1708	p->se.on_rq			= 0;
1709	p->se.exec_start		= 0;
1710	p->se.sum_exec_runtime		= 0;
1711	p->se.prev_sum_exec_runtime	= 0;
1712	p->se.nr_migrations		= 0;
1713	p->se.vruntime			= 0;
1714	INIT_LIST_HEAD(&p->se.group_node);
1715
1716#ifdef CONFIG_SCHEDSTATS
1717	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1718#endif
1719
1720	INIT_LIST_HEAD(&p->rt.run_list);
1721
1722#ifdef CONFIG_PREEMPT_NOTIFIERS
1723	INIT_HLIST_HEAD(&p->preempt_notifiers);
1724#endif
1725
1726#ifdef CONFIG_NUMA_BALANCING
1727	if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
1728		p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1729		p->mm->numa_scan_seq = 0;
1730	}
1731
1732	if (clone_flags & CLONE_VM)
1733		p->numa_preferred_nid = current->numa_preferred_nid;
1734	else
1735		p->numa_preferred_nid = -1;
1736
1737	p->node_stamp = 0ULL;
1738	p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
1739	p->numa_scan_period = sysctl_numa_balancing_scan_delay;
1740	p->numa_work.next = &p->numa_work;
1741	p->numa_faults = NULL;
1742	p->numa_faults_buffer = NULL;
1743
1744	INIT_LIST_HEAD(&p->numa_entry);
1745	p->numa_group = NULL;
1746#endif /* CONFIG_NUMA_BALANCING */
1747}
1748
1749#ifdef CONFIG_NUMA_BALANCING
1750#ifdef CONFIG_SCHED_DEBUG
1751void set_numabalancing_state(bool enabled)
1752{
1753	if (enabled)
1754		sched_feat_set("NUMA");
1755	else
1756		sched_feat_set("NO_NUMA");
1757}
1758#else
1759__read_mostly bool numabalancing_enabled;
1760
1761void set_numabalancing_state(bool enabled)
1762{
1763	numabalancing_enabled = enabled;
1764}
1765#endif /* CONFIG_SCHED_DEBUG */
1766#endif /* CONFIG_NUMA_BALANCING */
1767
1768/*
1769 * fork()/clone()-time setup:
1770 */
1771void sched_fork(unsigned long clone_flags, struct task_struct *p)
1772{
1773	unsigned long flags;
1774	int cpu = get_cpu();
1775
1776	__sched_fork(clone_flags, p);
1777	/*
1778	 * We mark the process as running here. This guarantees that
1779	 * nobody will actually run it, and a signal or other external
1780	 * event cannot wake it up and insert it on the runqueue either.
1781	 */
1782	p->state = TASK_RUNNING;
1783
1784	/*
1785	 * Make sure we do not leak PI boosting priority to the child.
1786	 */
1787	p->prio = current->normal_prio;
1788
1789	/*
1790	 * Revert to default priority/policy on fork if requested.
1791	 */
1792	if (unlikely(p->sched_reset_on_fork)) {
1793		if (task_has_rt_policy(p)) {
1794			p->policy = SCHED_NORMAL;
1795			p->static_prio = NICE_TO_PRIO(0);
1796			p->rt_priority = 0;
1797		} else if (PRIO_TO_NICE(p->static_prio) < 0)
1798			p->static_prio = NICE_TO_PRIO(0);
1799
1800		p->prio = p->normal_prio = __normal_prio(p);
1801		set_load_weight(p);
1802
1803		/*
1804		 * We don't need the reset flag anymore after the fork. It has
1805		 * fulfilled its duty:
1806		 */
1807		p->sched_reset_on_fork = 0;
1808	}
1809
1810	if (!rt_prio(p->prio))
1811		p->sched_class = &fair_sched_class;
1812
1813	if (p->sched_class->task_fork)
1814		p->sched_class->task_fork(p);
1815
1816	/*
1817	 * The child is not yet in the pid-hash so no cgroup attach races,
1818	 * and the cgroup is pinned to this child due to cgroup_fork()
1819	 * is ran before sched_fork().
1820	 *
1821	 * Silence PROVE_RCU.
1822	 */
1823	raw_spin_lock_irqsave(&p->pi_lock, flags);
1824	set_task_cpu(p, cpu);
1825	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1826
1827#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1828	if (likely(sched_info_on()))
1829		memset(&p->sched_info, 0, sizeof(p->sched_info));
1830#endif
1831#if defined(CONFIG_SMP)
1832	p->on_cpu = 0;
1833#endif
1834	init_task_preempt_count(p);
1835#ifdef CONFIG_SMP
1836	plist_node_init(&p->pushable_tasks, MAX_PRIO);
1837#endif
1838
1839	put_cpu();
1840}
1841
1842/*
1843 * wake_up_new_task - wake up a newly created task for the first time.
1844 *
1845 * This function will do some initial scheduler statistics housekeeping
1846 * that must be done for every newly created context, then puts the task
1847 * on the runqueue and wakes it.
1848 */
1849void wake_up_new_task(struct task_struct *p)
1850{
1851	unsigned long flags;
1852	struct rq *rq;
1853
1854	raw_spin_lock_irqsave(&p->pi_lock, flags);
1855#ifdef CONFIG_SMP
1856	/*
1857	 * Fork balancing, do it here and not earlier because:
1858	 *  - cpus_allowed can change in the fork path
1859	 *  - any previously selected cpu might disappear through hotplug
1860	 */
1861	set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
1862#endif
1863
1864	/* Initialize new task's runnable average */
1865	init_task_runnable_average(p);
1866	rq = __task_rq_lock(p);
1867	activate_task(rq, p, 0);
1868	p->on_rq = 1;
1869	trace_sched_wakeup_new(p, true);
1870	check_preempt_curr(rq, p, WF_FORK);
1871#ifdef CONFIG_SMP
1872	if (p->sched_class->task_woken)
1873		p->sched_class->task_woken(rq, p);
1874#endif
1875	task_rq_unlock(rq, p, &flags);
1876}
1877
1878#ifdef CONFIG_PREEMPT_NOTIFIERS
1879
1880/**
1881 * preempt_notifier_register - tell me when current is being preempted & rescheduled
1882 * @notifier: notifier struct to register
1883 */
1884void preempt_notifier_register(struct preempt_notifier *notifier)
1885{
1886	hlist_add_head(&notifier->link, &current->preempt_notifiers);
1887}
1888EXPORT_SYMBOL_GPL(preempt_notifier_register);
1889
1890/**
1891 * preempt_notifier_unregister - no longer interested in preemption notifications
1892 * @notifier: notifier struct to unregister
1893 *
1894 * This is safe to call from within a preemption notifier.
1895 */
1896void preempt_notifier_unregister(struct preempt_notifier *notifier)
1897{
1898	hlist_del(&notifier->link);
1899}
1900EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1901
1902static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1903{
1904	struct preempt_notifier *notifier;
1905
1906	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1907		notifier->ops->sched_in(notifier, raw_smp_processor_id());
1908}
1909
1910static void
1911fire_sched_out_preempt_notifiers(struct task_struct *curr,
1912				 struct task_struct *next)
1913{
1914	struct preempt_notifier *notifier;
1915
1916	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1917		notifier->ops->sched_out(notifier, next);
1918}
1919
1920#else /* !CONFIG_PREEMPT_NOTIFIERS */
1921
1922static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1923{
1924}
1925
1926static void
1927fire_sched_out_preempt_notifiers(struct task_struct *curr,
1928				 struct task_struct *next)
1929{
1930}
1931
1932#endif /* CONFIG_PREEMPT_NOTIFIERS */
1933
1934/**
1935 * prepare_task_switch - prepare to switch tasks
1936 * @rq: the runqueue preparing to switch
1937 * @prev: the current task that is being switched out
1938 * @next: the task we are going to switch to.
1939 *
1940 * This is called with the rq lock held and interrupts off. It must
1941 * be paired with a subsequent finish_task_switch after the context
1942 * switch.
1943 *
1944 * prepare_task_switch sets up locking and calls architecture specific
1945 * hooks.
1946 */
1947static inline void
1948prepare_task_switch(struct rq *rq, struct task_struct *prev,
1949		    struct task_struct *next)
1950{
1951	trace_sched_switch(prev, next);
1952	sched_info_switch(rq, prev, next);
1953	perf_event_task_sched_out(prev, next);
1954	fire_sched_out_preempt_notifiers(prev, next);
1955	prepare_lock_switch(rq, next);
1956	prepare_arch_switch(next);
1957}
1958
1959/**
1960 * finish_task_switch - clean up after a task-switch
1961 * @rq: runqueue associated with task-switch
1962 * @prev: the thread we just switched away from.
1963 *
1964 * finish_task_switch must be called after the context switch, paired
1965 * with a prepare_task_switch call before the context switch.
1966 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1967 * and do any other architecture-specific cleanup actions.
1968 *
1969 * Note that we may have delayed dropping an mm in context_switch(). If
1970 * so, we finish that here outside of the runqueue lock. (Doing it
1971 * with the lock held can cause deadlocks; see schedule() for
1972 * details.)
1973 */
1974static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1975	__releases(rq->lock)
1976{
1977	struct mm_struct *mm = rq->prev_mm;
1978	long prev_state;
1979
1980	rq->prev_mm = NULL;
1981
1982	/*
1983	 * A task struct has one reference for the use as "current".
1984	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1985	 * schedule one last time. The schedule call will never return, and
1986	 * the scheduled task must drop that reference.
1987	 * The test for TASK_DEAD must occur while the runqueue locks are
1988	 * still held, otherwise prev could be scheduled on another cpu, die
1989	 * there before we look at prev->state, and then the reference would
1990	 * be dropped twice.
1991	 *		Manfred Spraul <manfred@colorfullife.com>
1992	 */
1993	prev_state = prev->state;
1994	vtime_task_switch(prev);
1995	finish_arch_switch(prev);
1996	perf_event_task_sched_in(prev, current);
1997	finish_lock_switch(rq, prev);
1998	finish_arch_post_lock_switch();
1999
2000	fire_sched_in_preempt_notifiers(current);
2001	if (mm)
2002		mmdrop(mm);
2003	if (unlikely(prev_state == TASK_DEAD)) {
2004		task_numa_free(prev);
2005
2006		/*
2007		 * Remove function-return probe instances associated with this
2008		 * task and put them back on the free list.
2009		 */
2010		kprobe_flush_task(prev);
2011		put_task_struct(prev);
2012	}
2013
2014	tick_nohz_task_switch(current);
2015}
2016
2017#ifdef CONFIG_SMP
2018
2019/* assumes rq->lock is held */
2020static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2021{
2022	if (prev->sched_class->pre_schedule)
2023		prev->sched_class->pre_schedule(rq, prev);
2024}
2025
2026/* rq->lock is NOT held, but preemption is disabled */
2027static inline void post_schedule(struct rq *rq)
2028{
2029	if (rq->post_schedule) {
2030		unsigned long flags;
2031
2032		raw_spin_lock_irqsave(&rq->lock, flags);
2033		if (rq->curr->sched_class->post_schedule)
2034			rq->curr->sched_class->post_schedule(rq);
2035		raw_spin_unlock_irqrestore(&rq->lock, flags);
2036
2037		rq->post_schedule = 0;
2038	}
2039}
2040
2041#else
2042
2043static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2044{
2045}
2046
2047static inline void post_schedule(struct rq *rq)
2048{
2049}
2050
2051#endif
2052
2053/**
2054 * schedule_tail - first thing a freshly forked thread must call.
2055 * @prev: the thread we just switched away from.
2056 */
2057asmlinkage void schedule_tail(struct task_struct *prev)
2058	__releases(rq->lock)
2059{
2060	struct rq *rq = this_rq();
2061
2062	finish_task_switch(rq, prev);
2063
2064	/*
2065	 * FIXME: do we need to worry about rq being invalidated by the
2066	 * task_switch?
2067	 */
2068	post_schedule(rq);
2069
2070#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2071	/* In this case, finish_task_switch does not reenable preemption */
2072	preempt_enable();
2073#endif
2074	if (current->set_child_tid)
2075		put_user(task_pid_vnr(current), current->set_child_tid);
2076}
2077
2078/*
2079 * context_switch - switch to the new MM and the new
2080 * thread's register state.
2081 */
2082static inline void
2083context_switch(struct rq *rq, struct task_struct *prev,
2084	       struct task_struct *next)
2085{
2086	struct mm_struct *mm, *oldmm;
2087
2088	prepare_task_switch(rq, prev, next);
2089
2090	mm = next->mm;
2091	oldmm = prev->active_mm;
2092	/*
2093	 * For paravirt, this is coupled with an exit in switch_to to
2094	 * combine the page table reload and the switch backend into
2095	 * one hypercall.
2096	 */
2097	arch_start_context_switch(prev);
2098
2099	if (!mm) {
2100		next->active_mm = oldmm;
2101		atomic_inc(&oldmm->mm_count);
2102		enter_lazy_tlb(oldmm, next);
2103	} else
2104		switch_mm(oldmm, mm, next);
2105
2106	if (!prev->mm) {
2107		prev->active_mm = NULL;
2108		rq->prev_mm = oldmm;
2109	}
2110	/*
2111	 * Since the runqueue lock will be released by the next
2112	 * task (which is an invalid locking op but in the case
2113	 * of the scheduler it's an obvious special-case), so we
2114	 * do an early lockdep release here:
2115	 */
2116#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2117	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2118#endif
2119
2120	context_tracking_task_switch(prev, next);
2121	/* Here we just switch the register state and the stack. */
2122	switch_to(prev, next, prev);
2123
2124	barrier();
2125	/*
2126	 * this_rq must be evaluated again because prev may have moved
2127	 * CPUs since it called schedule(), thus the 'rq' on its stack
2128	 * frame will be invalid.
2129	 */
2130	finish_task_switch(this_rq(), prev);
2131}
2132
2133/*
2134 * nr_running and nr_context_switches:
2135 *
2136 * externally visible scheduler statistics: current number of runnable
2137 * threads, total number of context switches performed since bootup.
2138 */
2139unsigned long nr_running(void)
2140{
2141	unsigned long i, sum = 0;
2142
2143	for_each_online_cpu(i)
2144		sum += cpu_rq(i)->nr_running;
2145
2146	return sum;
2147}
2148
2149unsigned long long nr_context_switches(void)
2150{
2151	int i;
2152	unsigned long long sum = 0;
2153
2154	for_each_possible_cpu(i)
2155		sum += cpu_rq(i)->nr_switches;
2156
2157	return sum;
2158}
2159
2160unsigned long nr_iowait(void)
2161{
2162	unsigned long i, sum = 0;
2163
2164	for_each_possible_cpu(i)
2165		sum += atomic_read(&cpu_rq(i)->nr_iowait);
2166
2167	return sum;
2168}
2169
2170unsigned long nr_iowait_cpu(int cpu)
2171{
2172	struct rq *this = cpu_rq(cpu);
2173	return atomic_read(&this->nr_iowait);
2174}
2175
2176#ifdef CONFIG_SMP
2177
2178/*
2179 * sched_exec - execve() is a valuable balancing opportunity, because at
2180 * this point the task has the smallest effective memory and cache footprint.
2181 */
2182void sched_exec(void)
2183{
2184	struct task_struct *p = current;
2185	unsigned long flags;
2186	int dest_cpu;
2187
2188	raw_spin_lock_irqsave(&p->pi_lock, flags);
2189	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
2190	if (dest_cpu == smp_processor_id())
2191		goto unlock;
2192
2193	if (likely(cpu_active(dest_cpu))) {
2194		struct migration_arg arg = { p, dest_cpu };
2195
2196		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2197		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2198		return;
2199	}
2200unlock:
2201	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2202}
2203
2204#endif
2205
2206DEFINE_PER_CPU(struct kernel_stat, kstat);
2207DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2208
2209EXPORT_PER_CPU_SYMBOL(kstat);
2210EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2211
2212/*
2213 * Return any ns on the sched_clock that have not yet been accounted in
2214 * @p in case that task is currently running.
2215 *
2216 * Called with task_rq_lock() held on @rq.
2217 */
2218static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2219{
2220	u64 ns = 0;
2221
2222	if (task_current(rq, p)) {
2223		update_rq_clock(rq);
2224		ns = rq_clock_task(rq) - p->se.exec_start;
2225		if ((s64)ns < 0)
2226			ns = 0;
2227	}
2228
2229	return ns;
2230}
2231
2232unsigned long long task_delta_exec(struct task_struct *p)
2233{
2234	unsigned long flags;
2235	struct rq *rq;
2236	u64 ns = 0;
2237
2238	rq = task_rq_lock(p, &flags);
2239	ns = do_task_delta_exec(p, rq);
2240	task_rq_unlock(rq, p, &flags);
2241
2242	return ns;
2243}
2244
2245/*
2246 * Return accounted runtime for the task.
2247 * In case the task is currently running, return the runtime plus current's
2248 * pending runtime that have not been accounted yet.
2249 */
2250unsigned long long task_sched_runtime(struct task_struct *p)
2251{
2252	unsigned long flags;
2253	struct rq *rq;
2254	u64 ns = 0;
2255
2256	rq = task_rq_lock(p, &flags);
2257	ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
2258	task_rq_unlock(rq, p, &flags);
2259
2260	return ns;
2261}
2262
2263/*
2264 * This function gets called by the timer code, with HZ frequency.
2265 * We call it with interrupts disabled.
2266 */
2267void scheduler_tick(void)
2268{
2269	int cpu = smp_processor_id();
2270	struct rq *rq = cpu_rq(cpu);
2271	struct task_struct *curr = rq->curr;
2272
2273	sched_clock_tick();
2274
2275	raw_spin_lock(&rq->lock);
2276	update_rq_clock(rq);
2277	curr->sched_class->task_tick(rq, curr, 0);
2278	update_cpu_load_active(rq);
2279	raw_spin_unlock(&rq->lock);
2280
2281	perf_event_task_tick();
2282
2283#ifdef CONFIG_SMP
2284	rq->idle_balance = idle_cpu(cpu);
2285	trigger_load_balance(rq, cpu);
2286#endif
2287	rq_last_tick_reset(rq);
2288}
2289
2290#ifdef CONFIG_NO_HZ_FULL
2291/**
2292 * scheduler_tick_max_deferment
2293 *
2294 * Keep at least one tick per second when a single
2295 * active task is running because the scheduler doesn't
2296 * yet completely support full dynticks environment.
2297 *
2298 * This makes sure that uptime, CFS vruntime, load
2299 * balancing, etc... continue to move forward, even
2300 * with a very low granularity.
2301 *
2302 * Return: Maximum deferment in nanoseconds.
2303 */
2304u64 scheduler_tick_max_deferment(void)
2305{
2306	struct rq *rq = this_rq();
2307	unsigned long next, now = ACCESS_ONCE(jiffies);
2308
2309	next = rq->last_sched_tick + HZ;
2310
2311	if (time_before_eq(next, now))
2312		return 0;
2313
2314	return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
2315}
2316#endif
2317
2318notrace unsigned long get_parent_ip(unsigned long addr)
2319{
2320	if (in_lock_functions(addr)) {
2321		addr = CALLER_ADDR2;
2322		if (in_lock_functions(addr))
2323			addr = CALLER_ADDR3;
2324	}
2325	return addr;
2326}
2327
2328#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2329				defined(CONFIG_PREEMPT_TRACER))
2330
2331void __kprobes preempt_count_add(int val)
2332{
2333#ifdef CONFIG_DEBUG_PREEMPT
2334	/*
2335	 * Underflow?
2336	 */
2337	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2338		return;
2339#endif
2340	__preempt_count_add(val);
2341#ifdef CONFIG_DEBUG_PREEMPT
2342	/*
2343	 * Spinlock count overflowing soon?
2344	 */
2345	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2346				PREEMPT_MASK - 10);
2347#endif
2348	if (preempt_count() == val)
2349		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2350}
2351EXPORT_SYMBOL(preempt_count_add);
2352
2353void __kprobes preempt_count_sub(int val)
2354{
2355#ifdef CONFIG_DEBUG_PREEMPT
2356	/*
2357	 * Underflow?
2358	 */
2359	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
2360		return;
2361	/*
2362	 * Is the spinlock portion underflowing?
2363	 */
2364	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2365			!(preempt_count() & PREEMPT_MASK)))
2366		return;
2367#endif
2368
2369	if (preempt_count() == val)
2370		trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2371	__preempt_count_sub(val);
2372}
2373EXPORT_SYMBOL(preempt_count_sub);
2374
2375#endif
2376
2377/*
2378 * Print scheduling while atomic bug:
2379 */
2380static noinline void __schedule_bug(struct task_struct *prev)
2381{
2382	if (oops_in_progress)
2383		return;
2384
2385	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2386		prev->comm, prev->pid, preempt_count());
2387
2388	debug_show_held_locks(prev);
2389	print_modules();
2390	if (irqs_disabled())
2391		print_irqtrace_events(prev);
2392	dump_stack();
2393	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
2394}
2395
2396/*
2397 * Various schedule()-time debugging checks and statistics:
2398 */
2399static inline void schedule_debug(struct task_struct *prev)
2400{
2401	/*
2402	 * Test if we are atomic. Since do_exit() needs to call into
2403	 * schedule() atomically, we ignore that path for now.
2404	 * Otherwise, whine if we are scheduling when we should not be.
2405	 */
2406	if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
2407		__schedule_bug(prev);
2408	rcu_sleep_check();
2409
2410	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2411
2412	schedstat_inc(this_rq(), sched_count);
2413}
2414
2415static void put_prev_task(struct rq *rq, struct task_struct *prev)
2416{
2417	if (prev->on_rq || rq->skip_clock_update < 0)
2418		update_rq_clock(rq);
2419	prev->sched_class->put_prev_task(rq, prev);
2420}
2421
2422/*
2423 * Pick up the highest-prio task:
2424 */
2425static inline struct task_struct *
2426pick_next_task(struct rq *rq)
2427{
2428	const struct sched_class *class;
2429	struct task_struct *p;
2430
2431	/*
2432	 * Optimization: we know that if all tasks are in
2433	 * the fair class we can call that function directly:
2434	 */
2435	if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
2436		p = fair_sched_class.pick_next_task(rq);
2437		if (likely(p))
2438			return p;
2439	}
2440
2441	for_each_class(class) {
2442		p = class->pick_next_task(rq);
2443		if (p)
2444			return p;
2445	}
2446
2447	BUG(); /* the idle class will always have a runnable task */
2448}
2449
2450/*
2451 * __schedule() is the main scheduler function.
2452 *
2453 * The main means of driving the scheduler and thus entering this function are:
2454 *
2455 *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
2456 *
2457 *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
2458 *      paths. For example, see arch/x86/entry_64.S.
2459 *
2460 *      To drive preemption between tasks, the scheduler sets the flag in timer
2461 *      interrupt handler scheduler_tick().
2462 *
2463 *   3. Wakeups don't really cause entry into schedule(). They add a
2464 *      task to the run-queue and that's it.
2465 *
2466 *      Now, if the new task added to the run-queue preempts the current
2467 *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
2468 *      called on the nearest possible occasion:
2469 *
2470 *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
2471 *
2472 *         - in syscall or exception context, at the next outmost
2473 *           preempt_enable(). (this might be as soon as the wake_up()'s
2474 *           spin_unlock()!)
2475 *
2476 *         - in IRQ context, return from interrupt-handler to
2477 *           preemptible context
2478 *
2479 *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
2480 *         then at the next:
2481 *
2482 *          - cond_resched() call
2483 *          - explicit schedule() call
2484 *          - return from syscall or exception to user-space
2485 *          - return from interrupt-handler to user-space
2486 */
2487static void __sched __schedule(void)
2488{
2489	struct task_struct *prev, *next;
2490	unsigned long *switch_count;
2491	struct rq *rq;
2492	int cpu;
2493
2494need_resched:
2495	preempt_disable();
2496	cpu = smp_processor_id();
2497	rq = cpu_rq(cpu);
2498	rcu_note_context_switch(cpu);
2499	prev = rq->curr;
2500
2501	schedule_debug(prev);
2502
2503	if (sched_feat(HRTICK))
2504		hrtick_clear(rq);
2505
2506	/*
2507	 * Make sure that signal_pending_state()->signal_pending() below
2508	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
2509	 * done by the caller to avoid the race with signal_wake_up().
2510	 */
2511	smp_mb__before_spinlock();
2512	raw_spin_lock_irq(&rq->lock);
2513
2514	switch_count = &prev->nivcsw;
2515	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2516		if (unlikely(signal_pending_state(prev->state, prev))) {
2517			prev->state = TASK_RUNNING;
2518		} else {
2519			deactivate_task(rq, prev, DEQUEUE_SLEEP);
2520			prev->on_rq = 0;
2521
2522			/*
2523			 * If a worker went to sleep, notify and ask workqueue
2524			 * whether it wants to wake up a task to maintain
2525			 * concurrency.
2526			 */
2527			if (prev->flags & PF_WQ_WORKER) {
2528				struct task_struct *to_wakeup;
2529
2530				to_wakeup = wq_worker_sleeping(prev, cpu);
2531				if (to_wakeup)
2532					try_to_wake_up_local(to_wakeup);
2533			}
2534		}
2535		switch_count = &prev->nvcsw;
2536	}
2537
2538	pre_schedule(rq, prev);
2539
2540	if (unlikely(!rq->nr_running))
2541		idle_balance(cpu, rq);
2542
2543	put_prev_task(rq, prev);
2544	next = pick_next_task(rq);
2545	clear_tsk_need_resched(prev);
2546	clear_preempt_need_resched();
2547	rq->skip_clock_update = 0;
2548
2549	if (likely(prev != next)) {
2550		rq->nr_switches++;
2551		rq->curr = next;
2552		++*switch_count;
2553
2554		context_switch(rq, prev, next); /* unlocks the rq */
2555		/*
2556		 * The context switch have flipped the stack from under us
2557		 * and restored the local variables which were saved when
2558		 * this task called schedule() in the past. prev == current
2559		 * is still correct, but it can be moved to another cpu/rq.
2560		 */
2561		cpu = smp_processor_id();
2562		rq = cpu_rq(cpu);
2563	} else
2564		raw_spin_unlock_irq(&rq->lock);
2565
2566	post_schedule(rq);
2567
2568	sched_preempt_enable_no_resched();
2569	if (need_resched())
2570		goto need_resched;
2571}
2572
2573static inline void sched_submit_work(struct task_struct *tsk)
2574{
2575	if (!tsk->state || tsk_is_pi_blocked(tsk))
2576		return;
2577	/*
2578	 * If we are going to sleep and we have plugged IO queued,
2579	 * make sure to submit it to avoid deadlocks.
2580	 */
2581	if (blk_needs_flush_plug(tsk))
2582		blk_schedule_flush_plug(tsk);
2583}
2584
2585asmlinkage void __sched schedule(void)
2586{
2587	struct task_struct *tsk = current;
2588
2589	sched_submit_work(tsk);
2590	__schedule();
2591}
2592EXPORT_SYMBOL(schedule);
2593
2594#ifdef CONFIG_CONTEXT_TRACKING
2595asmlinkage void __sched schedule_user(void)
2596{
2597	/*
2598	 * If we come here after a random call to set_need_resched(),
2599	 * or we have been woken up remotely but the IPI has not yet arrived,
2600	 * we haven't yet exited the RCU idle mode. Do it here manually until
2601	 * we find a better solution.
2602	 */
2603	user_exit();
2604	schedule();
2605	user_enter();
2606}
2607#endif
2608
2609/**
2610 * schedule_preempt_disabled - called with preemption disabled
2611 *
2612 * Returns with preemption disabled. Note: preempt_count must be 1
2613 */
2614void __sched schedule_preempt_disabled(void)
2615{
2616	sched_preempt_enable_no_resched();
2617	schedule();
2618	preempt_disable();
2619}
2620
2621#ifdef CONFIG_PREEMPT
2622/*
2623 * this is the entry point to schedule() from in-kernel preemption
2624 * off of preempt_enable. Kernel preemptions off return from interrupt
2625 * occur there and call schedule directly.
2626 */
2627asmlinkage void __sched notrace preempt_schedule(void)
2628{
2629	/*
2630	 * If there is a non-zero preempt_count or interrupts are disabled,
2631	 * we do not want to preempt the current task. Just return..
2632	 */
2633	if (likely(!preemptible()))
2634		return;
2635
2636	do {
2637		__preempt_count_add(PREEMPT_ACTIVE);
2638		__schedule();
2639		__preempt_count_sub(PREEMPT_ACTIVE);
2640
2641		/*
2642		 * Check again in case we missed a preemption opportunity
2643		 * between schedule and now.
2644		 */
2645		barrier();
2646	} while (need_resched());
2647}
2648EXPORT_SYMBOL(preempt_schedule);
2649
2650/*
2651 * this is the entry point to schedule() from kernel preemption
2652 * off of irq context.
2653 * Note, that this is called and return with irqs disabled. This will
2654 * protect us against recursive calling from irq.
2655 */
2656asmlinkage void __sched preempt_schedule_irq(void)
2657{
2658	enum ctx_state prev_state;
2659
2660	/* Catch callers which need to be fixed */
2661	BUG_ON(preempt_count() || !irqs_disabled());
2662
2663	prev_state = exception_enter();
2664
2665	do {
2666		__preempt_count_add(PREEMPT_ACTIVE);
2667		local_irq_enable();
2668		__schedule();
2669		local_irq_disable();
2670		__preempt_count_sub(PREEMPT_ACTIVE);
2671
2672		/*
2673		 * Check again in case we missed a preemption opportunity
2674		 * between schedule and now.
2675		 */
2676		barrier();
2677	} while (need_resched());
2678
2679	exception_exit(prev_state);
2680}
2681
2682#endif /* CONFIG_PREEMPT */
2683
2684int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
2685			  void *key)
2686{
2687	return try_to_wake_up(curr->private, mode, wake_flags);
2688}
2689EXPORT_SYMBOL(default_wake_function);
2690
2691static long __sched
2692sleep_on_common(wait_queue_head_t *q, int state, long timeout)
2693{
2694	unsigned long flags;
2695	wait_queue_t wait;
2696
2697	init_waitqueue_entry(&wait, current);
2698
2699	__set_current_state(state);
2700
2701	spin_lock_irqsave(&q->lock, flags);
2702	__add_wait_queue(q, &wait);
2703	spin_unlock(&q->lock);
2704	timeout = schedule_timeout(timeout);
2705	spin_lock_irq(&q->lock);
2706	__remove_wait_queue(q, &wait);
2707	spin_unlock_irqrestore(&q->lock, flags);
2708
2709	return timeout;
2710}
2711
2712void __sched interruptible_sleep_on(wait_queue_head_t *q)
2713{
2714	sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2715}
2716EXPORT_SYMBOL(interruptible_sleep_on);
2717
2718long __sched
2719interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
2720{
2721	return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
2722}
2723EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2724
2725void __sched sleep_on(wait_queue_head_t *q)
2726{
2727	sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2728}
2729EXPORT_SYMBOL(sleep_on);
2730
2731long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
2732{
2733	return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
2734}
2735EXPORT_SYMBOL(sleep_on_timeout);
2736
2737#ifdef CONFIG_RT_MUTEXES
2738
2739/*
2740 * rt_mutex_setprio - set the current priority of a task
2741 * @p: task
2742 * @prio: prio value (kernel-internal form)
2743 *
2744 * This function changes the 'effective' priority of a task. It does
2745 * not touch ->normal_prio like __setscheduler().
2746 *
2747 * Used by the rt_mutex code to implement priority inheritance logic.
2748 */
2749void rt_mutex_setprio(struct task_struct *p, int prio)
2750{
2751	int oldprio, on_rq, running;
2752	struct rq *rq;
2753	const struct sched_class *prev_class;
2754
2755	BUG_ON(prio < 0 || prio > MAX_PRIO);
2756
2757	rq = __task_rq_lock(p);
2758
2759	/*
2760	 * Idle task boosting is a nono in general. There is one
2761	 * exception, when PREEMPT_RT and NOHZ is active:
2762	 *
2763	 * The idle task calls get_next_timer_interrupt() and holds
2764	 * the timer wheel base->lock on the CPU and another CPU wants
2765	 * to access the timer (probably to cancel it). We can safely
2766	 * ignore the boosting request, as the idle CPU runs this code
2767	 * with interrupts disabled and will complete the lock
2768	 * protected section without being interrupted. So there is no
2769	 * real need to boost.
2770	 */
2771	if (unlikely(p == rq->idle)) {
2772		WARN_ON(p != rq->curr);
2773		WARN_ON(p->pi_blocked_on);
2774		goto out_unlock;
2775	}
2776
2777	trace_sched_pi_setprio(p, prio);
2778	oldprio = p->prio;
2779	prev_class = p->sched_class;
2780	on_rq = p->on_rq;
2781	running = task_current(rq, p);
2782	if (on_rq)
2783		dequeue_task(rq, p, 0);
2784	if (running)
2785		p->sched_class->put_prev_task(rq, p);
2786
2787	if (rt_prio(prio))
2788		p->sched_class = &rt_sched_class;
2789	else
2790		p->sched_class = &fair_sched_class;
2791
2792	p->prio = prio;
2793
2794	if (running)
2795		p->sched_class->set_curr_task(rq);
2796	if (on_rq)
2797		enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
2798
2799	check_class_changed(rq, p, prev_class, oldprio);
2800out_unlock:
2801	__task_rq_unlock(rq);
2802}
2803#endif
2804void set_user_nice(struct task_struct *p, long nice)
2805{
2806	int old_prio, delta, on_rq;
2807	unsigned long flags;
2808	struct rq *rq;
2809
2810	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
2811		return;
2812	/*
2813	 * We have to be careful, if called from sys_setpriority(),
2814	 * the task might be in the middle of scheduling on another CPU.
2815	 */
2816	rq = task_rq_lock(p, &flags);
2817	/*
2818	 * The RT priorities are set via sched_setscheduler(), but we still
2819	 * allow the 'normal' nice value to be set - but as expected
2820	 * it wont have any effect on scheduling until the task is
2821	 * SCHED_FIFO/SCHED_RR:
2822	 */
2823	if (task_has_rt_policy(p)) {
2824		p->static_prio = NICE_TO_PRIO(nice);
2825		goto out_unlock;
2826	}
2827	on_rq = p->on_rq;
2828	if (on_rq)
2829		dequeue_task(rq, p, 0);
2830
2831	p->static_prio = NICE_TO_PRIO(nice);
2832	set_load_weight(p);
2833	old_prio = p->prio;
2834	p->prio = effective_prio(p);
2835	delta = p->prio - old_prio;
2836
2837	if (on_rq) {
2838		enqueue_task(rq, p, 0);
2839		/*
2840		 * If the task increased its priority or is running and
2841		 * lowered its priority, then reschedule its CPU:
2842		 */
2843		if (delta < 0 || (delta > 0 && task_running(rq, p)))
2844			resched_task(rq->curr);
2845	}
2846out_unlock:
2847	task_rq_unlock(rq, p, &flags);
2848}
2849EXPORT_SYMBOL(set_user_nice);
2850
2851/*
2852 * can_nice - check if a task can reduce its nice value
2853 * @p: task
2854 * @nice: nice value
2855 */
2856int can_nice(const struct task_struct *p, const int nice)
2857{
2858	/* convert nice value [19,-20] to rlimit style value [1,40] */
2859	int nice_rlim = 20 - nice;
2860
2861	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
2862		capable(CAP_SYS_NICE));
2863}
2864
2865#ifdef __ARCH_WANT_SYS_NICE
2866
2867/*
2868 * sys_nice - change the priority of the current process.
2869 * @increment: priority increment
2870 *
2871 * sys_setpriority is a more generic, but much slower function that
2872 * does similar things.
2873 */
2874SYSCALL_DEFINE1(nice, int, increment)
2875{
2876	long nice, retval;
2877
2878	/*
2879	 * Setpriority might change our priority at the same moment.
2880	 * We don't have to worry. Conceptually one call occurs first
2881	 * and we have a single winner.
2882	 */
2883	if (increment < -40)
2884		increment = -40;
2885	if (increment > 40)
2886		increment = 40;
2887
2888	nice = TASK_NICE(current) + increment;
2889	if (nice < -20)
2890		nice = -20;
2891	if (nice > 19)
2892		nice = 19;
2893
2894	if (increment < 0 && !can_nice(current, nice))
2895		return -EPERM;
2896
2897	retval = security_task_setnice(current, nice);
2898	if (retval)
2899		return retval;
2900
2901	set_user_nice(current, nice);
2902	return 0;
2903}
2904
2905#endif
2906
2907/**
2908 * task_prio - return the priority value of a given task.
2909 * @p: the task in question.
2910 *
2911 * Return: The priority value as seen by users in /proc.
2912 * RT tasks are offset by -200. Normal tasks are centered
2913 * around 0, value goes from -16 to +15.
2914 */
2915int task_prio(const struct task_struct *p)
2916{
2917	return p->prio - MAX_RT_PRIO;
2918}
2919
2920/**
2921 * task_nice - return the nice value of a given task.
2922 * @p: the task in question.
2923 *
2924 * Return: The nice value [ -20 ... 0 ... 19 ].
2925 */
2926int task_nice(const struct task_struct *p)
2927{
2928	return TASK_NICE(p);
2929}
2930EXPORT_SYMBOL(task_nice);
2931
2932/**
2933 * idle_cpu - is a given cpu idle currently?
2934 * @cpu: the processor in question.
2935 *
2936 * Return: 1 if the CPU is currently idle. 0 otherwise.
2937 */
2938int idle_cpu(int cpu)
2939{
2940	struct rq *rq = cpu_rq(cpu);
2941
2942	if (rq->curr != rq->idle)
2943		return 0;
2944
2945	if (rq->nr_running)
2946		return 0;
2947
2948#ifdef CONFIG_SMP
2949	if (!llist_empty(&rq->wake_list))
2950		return 0;
2951#endif
2952
2953	return 1;
2954}
2955
2956/**
2957 * idle_task - return the idle task for a given cpu.
2958 * @cpu: the processor in question.
2959 *
2960 * Return: The idle task for the cpu @cpu.
2961 */
2962struct task_struct *idle_task(int cpu)
2963{
2964	return cpu_rq(cpu)->idle;
2965}
2966
2967/**
2968 * find_process_by_pid - find a process with a matching PID value.
2969 * @pid: the pid in question.
2970 *
2971 * The task of @pid, if found. %NULL otherwise.
2972 */
2973static struct task_struct *find_process_by_pid(pid_t pid)
2974{
2975	return pid ? find_task_by_vpid(pid) : current;
2976}
2977
2978/* Actually do priority change: must hold rq lock. */
2979static void
2980__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
2981{
2982	p->policy = policy;
2983	p->rt_priority = prio;
2984	p->normal_prio = normal_prio(p);
2985	/* we are holding p->pi_lock already */
2986	p->prio = rt_mutex_getprio(p);
2987	if (rt_prio(p->prio))
2988		p->sched_class = &rt_sched_class;
2989	else
2990		p->sched_class = &fair_sched_class;
2991	set_load_weight(p);
2992}
2993
2994/*
2995 * check the target process has a UID that matches the current process's
2996 */
2997static bool check_same_owner(struct task_struct *p)
2998{
2999	const struct cred *cred = current_cred(), *pcred;
3000	bool match;
3001
3002	rcu_read_lock();
3003	pcred = __task_cred(p);
3004	match = (uid_eq(cred->euid, pcred->euid) ||
3005		 uid_eq(cred->euid, pcred->uid));
3006	rcu_read_unlock();
3007	return match;
3008}
3009
3010static int __sched_setscheduler(struct task_struct *p, int policy,
3011				const struct sched_param *param, bool user)
3012{
3013	int retval, oldprio, oldpolicy = -1, on_rq, running;
3014	unsigned long flags;
3015	const struct sched_class *prev_class;
3016	struct rq *rq;
3017	int reset_on_fork;
3018
3019	/* may grab non-irq protected spin_locks */
3020	BUG_ON(in_interrupt());
3021recheck:
3022	/* double check policy once rq lock held */
3023	if (policy < 0) {
3024		reset_on_fork = p->sched_reset_on_fork;
3025		policy = oldpolicy = p->policy;
3026	} else {
3027		reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3028		policy &= ~SCHED_RESET_ON_FORK;
3029
3030		if (policy != SCHED_FIFO && policy != SCHED_RR &&
3031				policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3032				policy != SCHED_IDLE)
3033			return -EINVAL;
3034	}
3035
3036	/*
3037	 * Valid priorities for SCHED_FIFO and SCHED_RR are
3038	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3039	 * SCHED_BATCH and SCHED_IDLE is 0.
3040	 */
3041	if (param->sched_priority < 0 ||
3042	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
3043	    (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
3044		return -EINVAL;
3045	if (rt_policy(policy) != (param->sched_priority != 0))
3046		return -EINVAL;
3047
3048	/*
3049	 * Allow unprivileged RT tasks to decrease priority:
3050	 */
3051	if (user && !capable(CAP_SYS_NICE)) {
3052		if (rt_policy(policy)) {
3053			unsigned long rlim_rtprio =
3054					task_rlimit(p, RLIMIT_RTPRIO);
3055
3056			/* can't set/change the rt policy */
3057			if (policy != p->policy && !rlim_rtprio)
3058				return -EPERM;
3059
3060			/* can't increase priority */
3061			if (param->sched_priority > p->rt_priority &&
3062			    param->sched_priority > rlim_rtprio)
3063				return -EPERM;
3064		}
3065
3066		/*
3067		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3068		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3069		 */
3070		if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3071			if (!can_nice(p, TASK_NICE(p)))
3072				return -EPERM;
3073		}
3074
3075		/* can't change other user's priorities */
3076		if (!check_same_owner(p))
3077			return -EPERM;
3078
3079		/* Normal users shall not reset the sched_reset_on_fork flag */
3080		if (p->sched_reset_on_fork && !reset_on_fork)
3081			return -EPERM;
3082	}
3083
3084	if (user) {
3085		retval = security_task_setscheduler(p);
3086		if (retval)
3087			return retval;
3088	}
3089
3090	/*
3091	 * make sure no PI-waiters arrive (or leave) while we are
3092	 * changing the priority of the task:
3093	 *
3094	 * To be able to change p->policy safely, the appropriate
3095	 * runqueue lock must be held.
3096	 */
3097	rq = task_rq_lock(p, &flags);
3098
3099	/*
3100	 * Changing the policy of the stop threads its a very bad idea
3101	 */
3102	if (p == rq->stop) {
3103		task_rq_unlock(rq, p, &flags);
3104		return -EINVAL;
3105	}
3106
3107	/*
3108	 * If not changing anything there's no need to proceed further:
3109	 */
3110	if (unlikely(policy == p->policy && (!rt_policy(policy) ||
3111			param->sched_priority == p->rt_priority))) {
3112		task_rq_unlock(rq, p, &flags);
3113		return 0;
3114	}
3115
3116#ifdef CONFIG_RT_GROUP_SCHED
3117	if (user) {
3118		/*
3119		 * Do not allow realtime tasks into groups that have no runtime
3120		 * assigned.
3121		 */
3122		if (rt_bandwidth_enabled() && rt_policy(policy) &&
3123				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3124				!task_group_is_autogroup(task_group(p))) {
3125			task_rq_unlock(rq, p, &flags);
3126			return -EPERM;
3127		}
3128	}
3129#endif
3130
3131	/* recheck policy now with rq lock held */
3132	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3133		policy = oldpolicy = -1;
3134		task_rq_unlock(rq, p, &flags);
3135		goto recheck;
3136	}
3137	on_rq = p->on_rq;
3138	running = task_current(rq, p);
3139	if (on_rq)
3140		dequeue_task(rq, p, 0);
3141	if (running)
3142		p->sched_class->put_prev_task(rq, p);
3143
3144	p->sched_reset_on_fork = reset_on_fork;
3145
3146	oldprio = p->prio;
3147	prev_class = p->sched_class;
3148	__setscheduler(rq, p, policy, param->sched_priority);
3149
3150	if (running)
3151		p->sched_class->set_curr_task(rq);
3152	if (on_rq)
3153		enqueue_task(rq, p, 0);
3154
3155	check_class_changed(rq, p, prev_class, oldprio);
3156	task_rq_unlock(rq, p, &flags);
3157
3158	rt_mutex_adjust_pi(p);
3159
3160	return 0;
3161}
3162
3163/**
3164 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3165 * @p: the task in question.
3166 * @policy: new policy.
3167 * @param: structure containing the new RT priority.
3168 *
3169 * Return: 0 on success. An error code otherwise.
3170 *
3171 * NOTE that the task may be already dead.
3172 */
3173int sched_setscheduler(struct task_struct *p, int policy,
3174		       const struct sched_param *param)
3175{
3176	return __sched_setscheduler(p, policy, param, true);
3177}
3178EXPORT_SYMBOL_GPL(sched_setscheduler);
3179
3180/**
3181 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
3182 * @p: the task in question.
3183 * @policy: new policy.
3184 * @param: structure containing the new RT priority.
3185 *
3186 * Just like sched_setscheduler, only don't bother checking if the
3187 * current context has permission.  For example, this is needed in
3188 * stop_machine(): we create temporary high priority worker threads,
3189 * but our caller might not have that capability.
3190 *
3191 * Return: 0 on success. An error code otherwise.
3192 */
3193int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3194			       const struct sched_param *param)
3195{
3196	return __sched_setscheduler(p, policy, param, false);
3197}
3198
3199static int
3200do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3201{
3202	struct sched_param lparam;
3203	struct task_struct *p;
3204	int retval;
3205
3206	if (!param || pid < 0)
3207		return -EINVAL;
3208	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3209		return -EFAULT;
3210
3211	rcu_read_lock();
3212	retval = -ESRCH;
3213	p = find_process_by_pid(pid);
3214	if (p != NULL)
3215		retval = sched_setscheduler(p, policy, &lparam);
3216	rcu_read_unlock();
3217
3218	return retval;
3219}
3220
3221/**
3222 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3223 * @pid: the pid in question.
3224 * @policy: new policy.
3225 * @param: structure containing the new RT priority.
3226 *
3227 * Return: 0 on success. An error code otherwise.
3228 */
3229SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3230		struct sched_param __user *, param)
3231{
3232	/* negative values for policy are not valid */
3233	if (policy < 0)
3234		return -EINVAL;
3235
3236	return do_sched_setscheduler(pid, policy, param);
3237}
3238
3239/**
3240 * sys_sched_setparam - set/change the RT priority of a thread
3241 * @pid: the pid in question.
3242 * @param: structure containing the new RT priority.
3243 *
3244 * Return: 0 on success. An error code otherwise.
3245 */
3246SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3247{
3248	return do_sched_setscheduler(pid, -1, param);
3249}
3250
3251/**
3252 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3253 * @pid: the pid in question.
3254 *
3255 * Return: On success, the policy of the thread. Otherwise, a negative error
3256 * code.
3257 */
3258SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3259{
3260	struct task_struct *p;
3261	int retval;
3262
3263	if (pid < 0)
3264		return -EINVAL;
3265
3266	retval = -ESRCH;
3267	rcu_read_lock();
3268	p = find_process_by_pid(pid);
3269	if (p) {
3270		retval = security_task_getscheduler(p);
3271		if (!retval)
3272			retval = p->policy
3273				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
3274	}
3275	rcu_read_unlock();
3276	return retval;
3277}
3278
3279/**
3280 * sys_sched_getparam - get the RT priority of a thread
3281 * @pid: the pid in question.
3282 * @param: structure containing the RT priority.
3283 *
3284 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
3285 * code.
3286 */
3287SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3288{
3289	struct sched_param lp;
3290	struct task_struct *p;
3291	int retval;
3292
3293	if (!param || pid < 0)
3294		return -EINVAL;
3295
3296	rcu_read_lock();
3297	p = find_process_by_pid(pid);
3298	retval = -ESRCH;
3299	if (!p)
3300		goto out_unlock;
3301
3302	retval = security_task_getscheduler(p);
3303	if (retval)
3304		goto out_unlock;
3305
3306	lp.sched_priority = p->rt_priority;
3307	rcu_read_unlock();
3308
3309	/*
3310	 * This one might sleep, we cannot do it with a spinlock held ...
3311	 */
3312	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3313
3314	return retval;
3315
3316out_unlock:
3317	rcu_read_unlock();
3318	return retval;
3319}
3320
3321long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
3322{
3323	cpumask_var_t cpus_allowed, new_mask;
3324	struct task_struct *p;
3325	int retval;
3326
3327	rcu_read_lock();
3328
3329	p = find_process_by_pid(pid);
3330	if (!p) {
3331		rcu_read_unlock();
3332		return -ESRCH;
3333	}
3334
3335	/* Prevent p going away */
3336	get_task_struct(p);
3337	rcu_read_unlock();
3338
3339	if (p->flags & PF_NO_SETAFFINITY) {
3340		retval = -EINVAL;
3341		goto out_put_task;
3342	}
3343	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
3344		retval = -ENOMEM;
3345		goto out_put_task;
3346	}
3347	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
3348		retval = -ENOMEM;
3349		goto out_free_cpus_allowed;
3350	}
3351	retval = -EPERM;
3352	if (!check_same_owner(p)) {
3353		rcu_read_lock();
3354		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
3355			rcu_read_unlock();
3356			goto out_unlock;
3357		}
3358		rcu_read_unlock();
3359	}
3360
3361	retval = security_task_setscheduler(p);
3362	if (retval)
3363		goto out_unlock;
3364
3365	cpuset_cpus_allowed(p, cpus_allowed);
3366	cpumask_and(new_mask, in_mask, cpus_allowed);
3367again:
3368	retval = set_cpus_allowed_ptr(p, new_mask);
3369
3370	if (!retval) {
3371		cpuset_cpus_allowed(p, cpus_allowed);
3372		if (!cpumask_subset(new_mask, cpus_allowed)) {
3373			/*
3374			 * We must have raced with a concurrent cpuset
3375			 * update. Just reset the cpus_allowed to the
3376			 * cpuset's cpus_allowed
3377			 */
3378			cpumask_copy(new_mask, cpus_allowed);
3379			goto again;
3380		}
3381	}
3382out_unlock:
3383	free_cpumask_var(new_mask);
3384out_free_cpus_allowed:
3385	free_cpumask_var(cpus_allowed);
3386out_put_task:
3387	put_task_struct(p);
3388	return retval;
3389}
3390
3391static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3392			     struct cpumask *new_mask)
3393{
3394	if (len < cpumask_size())
3395		cpumask_clear(new_mask);
3396	else if (len > cpumask_size())
3397		len = cpumask_size();
3398
3399	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
3400}
3401
3402/**
3403 * sys_sched_setaffinity - set the cpu affinity of a process
3404 * @pid: pid of the process
3405 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3406 * @user_mask_ptr: user-space pointer to the new cpu mask
3407 *
3408 * Return: 0 on success. An error code otherwise.
3409 */
3410SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3411		unsigned long __user *, user_mask_ptr)
3412{
3413	cpumask_var_t new_mask;
3414	int retval;
3415
3416	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
3417		return -ENOMEM;
3418
3419	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
3420	if (retval == 0)
3421		retval = sched_setaffinity(pid, new_mask);
3422	free_cpumask_var(new_mask);
3423	return retval;
3424}
3425
3426long sched_getaffinity(pid_t pid, struct cpumask *mask)
3427{
3428	struct task_struct *p;
3429	unsigned long flags;
3430	int retval;
3431
3432	rcu_read_lock();
3433
3434	retval = -ESRCH;
3435	p = find_process_by_pid(pid);
3436	if (!p)
3437		goto out_unlock;
3438
3439	retval = security_task_getscheduler(p);
3440	if (retval)
3441		goto out_unlock;
3442
3443	raw_spin_lock_irqsave(&p->pi_lock, flags);
3444	cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
3445	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3446
3447out_unlock:
3448	rcu_read_unlock();
3449
3450	return retval;
3451}
3452
3453/**
3454 * sys_sched_getaffinity - get the cpu affinity of a process
3455 * @pid: pid of the process
3456 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3457 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3458 *
3459 * Return: 0 on success. An error code otherwise.
3460 */
3461SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3462		unsigned long __user *, user_mask_ptr)
3463{
3464	int ret;
3465	cpumask_var_t mask;
3466
3467	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
3468		return -EINVAL;
3469	if (len & (sizeof(unsigned long)-1))
3470		return -EINVAL;
3471
3472	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
3473		return -ENOMEM;
3474
3475	ret = sched_getaffinity(pid, mask);
3476	if (ret == 0) {
3477		size_t retlen = min_t(size_t, len, cpumask_size());
3478
3479		if (copy_to_user(user_mask_ptr, mask, retlen))
3480			ret = -EFAULT;
3481		else
3482			ret = retlen;
3483	}
3484	free_cpumask_var(mask);
3485
3486	return ret;
3487}
3488
3489/**
3490 * sys_sched_yield - yield the current processor to other threads.
3491 *
3492 * This function yields the current CPU to other tasks. If there are no
3493 * other threads running on this CPU then this function will return.
3494 *
3495 * Return: 0.
3496 */
3497SYSCALL_DEFINE0(sched_yield)
3498{
3499	struct rq *rq = this_rq_lock();
3500
3501	schedstat_inc(rq, yld_count);
3502	current->sched_class->yield_task(rq);
3503
3504	/*
3505	 * Since we are going to call schedule() anyway, there's
3506	 * no need to preempt or enable interrupts:
3507	 */
3508	__release(rq->lock);
3509	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3510	do_raw_spin_unlock(&rq->lock);
3511	sched_preempt_enable_no_resched();
3512
3513	schedule();
3514
3515	return 0;
3516}
3517
3518static void __cond_resched(void)
3519{
3520	__preempt_count_add(PREEMPT_ACTIVE);
3521	__schedule();
3522	__preempt_count_sub(PREEMPT_ACTIVE);
3523}
3524
3525int __sched _cond_resched(void)
3526{
3527	if (should_resched()) {
3528		__cond_resched();
3529		return 1;
3530	}
3531	return 0;
3532}
3533EXPORT_SYMBOL(_cond_resched);
3534
3535/*
3536 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
3537 * call schedule, and on return reacquire the lock.
3538 *
3539 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
3540 * operations here to prevent schedule() from being called twice (once via
3541 * spin_unlock(), once by hand).
3542 */
3543int __cond_resched_lock(spinlock_t *lock)
3544{
3545	int resched = should_resched();
3546	int ret = 0;
3547
3548	lockdep_assert_held(lock);
3549
3550	if (spin_needbreak(lock) || resched) {
3551		spin_unlock(lock);
3552		if (resched)
3553			__cond_resched();
3554		else
3555			cpu_relax();
3556		ret = 1;
3557		spin_lock(lock);
3558	}
3559	return ret;
3560}
3561EXPORT_SYMBOL(__cond_resched_lock);
3562
3563int __sched __cond_resched_softirq(void)
3564{
3565	BUG_ON(!in_softirq());
3566
3567	if (should_resched()) {
3568		local_bh_enable();
3569		__cond_resched();
3570		local_bh_disable();
3571		return 1;
3572	}
3573	return 0;
3574}
3575EXPORT_SYMBOL(__cond_resched_softirq);
3576
3577/**
3578 * yield - yield the current processor to other threads.
3579 *
3580 * Do not ever use this function, there's a 99% chance you're doing it wrong.
3581 *
3582 * The scheduler is at all times free to pick the calling task as the most
3583 * eligible task to run, if removing the yield() call from your code breaks
3584 * it, its already broken.
3585 *
3586 * Typical broken usage is:
3587 *
3588 * while (!event)
3589 * 	yield();
3590 *
3591 * where one assumes that yield() will let 'the other' process run that will
3592 * make event true. If the current task is a SCHED_FIFO task that will never
3593 * happen. Never use yield() as a progress guarantee!!
3594 *
3595 * If you want to use yield() to wait for something, use wait_event().
3596 * If you want to use yield() to be 'nice' for others, use cond_resched().
3597 * If you still want to use yield(), do not!
3598 */
3599void __sched yield(void)
3600{
3601	set_current_state(TASK_RUNNING);
3602	sys_sched_yield();
3603}
3604EXPORT_SYMBOL(yield);
3605
3606/**
3607 * yield_to - yield the current processor to another thread in
3608 * your thread group, or accelerate that thread toward the
3609 * processor it's on.
3610 * @p: target task
3611 * @preempt: whether task preemption is allowed or not
3612 *
3613 * It's the caller's job to ensure that the target task struct
3614 * can't go away on us before we can do any checks.
3615 *
3616 * Return:
3617 *	true (>0) if we indeed boosted the target task.
3618 *	false (0) if we failed to boost the target.
3619 *	-ESRCH if there's no task to yield to.
3620 */
3621bool __sched yield_to(struct task_struct *p, bool preempt)
3622{
3623	struct task_struct *curr = current;
3624	struct rq *rq, *p_rq;
3625	unsigned long flags;
3626	int yielded = 0;
3627
3628	local_irq_save(flags);
3629	rq = this_rq();
3630
3631again:
3632	p_rq = task_rq(p);
3633	/*
3634	 * If we're the only runnable task on the rq and target rq also
3635	 * has only one task, there's absolutely no point in yielding.
3636	 */
3637	if (rq->nr_running == 1 && p_rq->nr_running == 1) {
3638		yielded = -ESRCH;
3639		goto out_irq;
3640	}
3641
3642	double_rq_lock(rq, p_rq);
3643	while (task_rq(p) != p_rq) {
3644		double_rq_unlock(rq, p_rq);
3645		goto again;
3646	}
3647
3648	if (!curr->sched_class->yield_to_task)
3649		goto out_unlock;
3650
3651	if (curr->sched_class != p->sched_class)
3652		goto out_unlock;
3653
3654	if (task_running(p_rq, p) || p->state)
3655		goto out_unlock;
3656
3657	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
3658	if (yielded) {
3659		schedstat_inc(rq, yld_count);
3660		/*
3661		 * Make p's CPU reschedule; pick_next_entity takes care of
3662		 * fairness.
3663		 */
3664		if (preempt && rq != p_rq)
3665			resched_task(p_rq->curr);
3666	}
3667
3668out_unlock:
3669	double_rq_unlock(rq, p_rq);
3670out_irq:
3671	local_irq_restore(flags);
3672
3673	if (yielded > 0)
3674		schedule();
3675
3676	return yielded;
3677}
3678EXPORT_SYMBOL_GPL(yield_to);
3679
3680/*
3681 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
3682 * that process accounting knows that this is a task in IO wait state.
3683 */
3684void __sched io_schedule(void)
3685{
3686	struct rq *rq = raw_rq();
3687
3688	delayacct_blkio_start();
3689	atomic_inc(&rq->nr_iowait);
3690	blk_flush_plug(current);
3691	current->in_iowait = 1;
3692	schedule();
3693	current->in_iowait = 0;
3694	atomic_dec(&rq->nr_iowait);
3695	delayacct_blkio_end();
3696}
3697EXPORT_SYMBOL(io_schedule);
3698
3699long __sched io_schedule_timeout(long timeout)
3700{
3701	struct rq *rq = raw_rq();
3702	long ret;
3703
3704	delayacct_blkio_start();
3705	atomic_inc(&rq->nr_iowait);
3706	blk_flush_plug(current);
3707	current->in_iowait = 1;
3708	ret = schedule_timeout(timeout);
3709	current->in_iowait = 0;
3710	atomic_dec(&rq->nr_iowait);
3711	delayacct_blkio_end();
3712	return ret;
3713}
3714
3715/**
3716 * sys_sched_get_priority_max - return maximum RT priority.
3717 * @policy: scheduling class.
3718 *
3719 * Return: On success, this syscall returns the maximum
3720 * rt_priority that can be used by a given scheduling class.
3721 * On failure, a negative error code is returned.
3722 */
3723SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3724{
3725	int ret = -EINVAL;
3726
3727	switch (policy) {
3728	case SCHED_FIFO:
3729	case SCHED_RR:
3730		ret = MAX_USER_RT_PRIO-1;
3731		break;
3732	case SCHED_NORMAL:
3733	case SCHED_BATCH:
3734	case SCHED_IDLE:
3735		ret = 0;
3736		break;
3737	}
3738	return ret;
3739}
3740
3741/**
3742 * sys_sched_get_priority_min - return minimum RT priority.
3743 * @policy: scheduling class.
3744 *
3745 * Return: On success, this syscall returns the minimum
3746 * rt_priority that can be used by a given scheduling class.
3747 * On failure, a negative error code is returned.
3748 */
3749SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
3750{
3751	int ret = -EINVAL;
3752
3753	switch (policy) {
3754	case SCHED_FIFO:
3755	case SCHED_RR:
3756		ret = 1;
3757		break;
3758	case SCHED_NORMAL:
3759	case SCHED_BATCH:
3760	case SCHED_IDLE:
3761		ret = 0;
3762	}
3763	return ret;
3764}
3765
3766/**
3767 * sys_sched_rr_get_interval - return the default timeslice of a process.
3768 * @pid: pid of the process.
3769 * @interval: userspace pointer to the timeslice value.
3770 *
3771 * this syscall writes the default timeslice value of a given process
3772 * into the user-space timespec buffer. A value of '0' means infinity.
3773 *
3774 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
3775 * an error code.
3776 */
3777SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
3778		struct timespec __user *, interval)
3779{
3780	struct task_struct *p;
3781	unsigned int time_slice;
3782	unsigned long flags;
3783	struct rq *rq;
3784	int retval;
3785	struct timespec t;
3786
3787	if (pid < 0)
3788		return -EINVAL;
3789
3790	retval = -ESRCH;
3791	rcu_read_lock();
3792	p = find_process_by_pid(pid);
3793	if (!p)
3794		goto out_unlock;
3795
3796	retval = security_task_getscheduler(p);
3797	if (retval)
3798		goto out_unlock;
3799
3800	rq = task_rq_lock(p, &flags);
3801	time_slice = p->sched_class->get_rr_interval(rq, p);
3802	task_rq_unlock(rq, p, &flags);
3803
3804	rcu_read_unlock();
3805	jiffies_to_timespec(time_slice, &t);
3806	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
3807	return retval;
3808
3809out_unlock:
3810	rcu_read_unlock();
3811	return retval;
3812}
3813
3814static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
3815
3816void sched_show_task(struct task_struct *p)
3817{
3818	unsigned long free = 0;
3819	int ppid;
3820	unsigned state;
3821
3822	state = p->state ? __ffs(p->state) + 1 : 0;
3823	printk(KERN_INFO "%-15.15s %c", p->comm,
3824		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
3825#if BITS_PER_LONG == 32
3826	if (state == TASK_RUNNING)
3827		printk(KERN_CONT " running  ");
3828	else
3829		printk(KERN_CONT " %08lx ", thread_saved_pc(p));
3830#else
3831	if (state == TASK_RUNNING)
3832		printk(KERN_CONT "  running task    ");
3833	else
3834		printk(KERN_CONT " %016lx ", thread_saved_pc(p));
3835#endif
3836#ifdef CONFIG_DEBUG_STACK_USAGE
3837	free = stack_not_used(p);
3838#endif
3839	rcu_read_lock();
3840	ppid = task_pid_nr(rcu_dereference(p->real_parent));
3841	rcu_read_unlock();
3842	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
3843		task_pid_nr(p), ppid,
3844		(unsigned long)task_thread_info(p)->flags);
3845
3846	print_worker_info(KERN_INFO, p);
3847	show_stack(p, NULL);
3848}
3849
3850void show_state_filter(unsigned long state_filter)
3851{
3852	struct task_struct *g, *p;
3853
3854#if BITS_PER_LONG == 32
3855	printk(KERN_INFO
3856		"  task                PC stack   pid father\n");
3857#else
3858	printk(KERN_INFO
3859		"  task                        PC stack   pid father\n");
3860#endif
3861	rcu_read_lock();
3862	do_each_thread(g, p) {
3863		/*
3864		 * reset the NMI-timeout, listing all files on a slow
3865		 * console might take a lot of time:
3866		 */
3867		touch_nmi_watchdog();
3868		if (!state_filter || (p->state & state_filter))
3869			sched_show_task(p);
3870	} while_each_thread(g, p);
3871
3872	touch_all_softlockup_watchdogs();
3873
3874#ifdef CONFIG_SCHED_DEBUG
3875	sysrq_sched_debug_show();
3876#endif
3877	rcu_read_unlock();
3878	/*
3879	 * Only show locks if all tasks are dumped:
3880	 */
3881	if (!state_filter)
3882		debug_show_all_locks();
3883}
3884
3885void init_idle_bootup_task(struct task_struct *idle)
3886{
3887	idle->sched_class = &idle_sched_class;
3888}
3889
3890/**
3891 * init_idle - set up an idle thread for a given CPU
3892 * @idle: task in question
3893 * @cpu: cpu the idle task belongs to
3894 *
3895 * NOTE: this function does not set the idle thread's NEED_RESCHED
3896 * flag, to make booting more robust.
3897 */
3898void init_idle(struct task_struct *idle, int cpu)
3899{
3900	struct rq *rq = cpu_rq(cpu);
3901	unsigned long flags;
3902
3903	raw_spin_lock_irqsave(&rq->lock, flags);
3904
3905	__sched_fork(0, idle);
3906	idle->state = TASK_RUNNING;
3907	idle->se.exec_start = sched_clock();
3908
3909	do_set_cpus_allowed(idle, cpumask_of(cpu));
3910	/*
3911	 * We're having a chicken and egg problem, even though we are
3912	 * holding rq->lock, the cpu isn't yet set to this cpu so the
3913	 * lockdep check in task_group() will fail.
3914	 *
3915	 * Similar case to sched_fork(). / Alternatively we could
3916	 * use task_rq_lock() here and obtain the other rq->lock.
3917	 *
3918	 * Silence PROVE_RCU
3919	 */
3920	rcu_read_lock();
3921	__set_task_cpu(idle, cpu);
3922	rcu_read_unlock();
3923
3924	rq->curr = rq->idle = idle;
3925#if defined(CONFIG_SMP)
3926	idle->on_cpu = 1;
3927#endif
3928	raw_spin_unlock_irqrestore(&rq->lock, flags);
3929
3930	/* Set the preempt count _outside_ the spinlocks! */
3931	init_idle_preempt_count(idle, cpu);
3932
3933	/*
3934	 * The idle tasks have their own, simple scheduling class:
3935	 */
3936	idle->sched_class = &idle_sched_class;
3937	ftrace_graph_init_idle_task(idle, cpu);
3938	vtime_init_idle(idle, cpu);
3939#if defined(CONFIG_SMP)
3940	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
3941#endif
3942}
3943
3944#ifdef CONFIG_SMP
3945void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
3946{
3947	if (p->sched_class && p->sched_class->set_cpus_allowed)
3948		p->sched_class->set_cpus_allowed(p, new_mask);
3949
3950	cpumask_copy(&p->cpus_allowed, new_mask);
3951	p->nr_cpus_allowed = cpumask_weight(new_mask);
3952}
3953
3954/*
3955 * This is how migration works:
3956 *
3957 * 1) we invoke migration_cpu_stop() on the target CPU using
3958 *    stop_one_cpu().
3959 * 2) stopper starts to run (implicitly forcing the migrated thread
3960 *    off the CPU)
3961 * 3) it checks whether the migrated task is still in the wrong runqueue.
3962 * 4) if it's in the wrong runqueue then the migration thread removes
3963 *    it and puts it into the right queue.
3964 * 5) stopper completes and stop_one_cpu() returns and the migration
3965 *    is done.
3966 */
3967
3968/*
3969 * Change a given task's CPU affinity. Migrate the thread to a
3970 * proper CPU and schedule it away if the CPU it's executing on
3971 * is removed from the allowed bitmask.
3972 *
3973 * NOTE: the caller must have a valid reference to the task, the
3974 * task must not exit() & deallocate itself prematurely. The
3975 * call is not atomic; no spinlocks may be held.
3976 */
3977int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3978{
3979	unsigned long flags;
3980	struct rq *rq;
3981	unsigned int dest_cpu;
3982	int ret = 0;
3983
3984	rq = task_rq_lock(p, &flags);
3985
3986	if (cpumask_equal(&p->cpus_allowed, new_mask))
3987		goto out;
3988
3989	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
3990		ret = -EINVAL;
3991		goto out;
3992	}
3993
3994	do_set_cpus_allowed(p, new_mask);
3995
3996	/* Can the task run on the task's current CPU? If so, we're done */
3997	if (cpumask_test_cpu(task_cpu(p), new_mask))
3998		goto out;
3999
4000	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4001	if (p->on_rq) {
4002		struct migration_arg arg = { p, dest_cpu };
4003		/* Need help from migration thread: drop lock and wait. */
4004		task_rq_unlock(rq, p, &flags);
4005		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
4006		tlb_migrate_finish(p->mm);
4007		return 0;
4008	}
4009out:
4010	task_rq_unlock(rq, p, &flags);
4011
4012	return ret;
4013}
4014EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
4015
4016/*
4017 * Move (not current) task off this cpu, onto dest cpu. We're doing
4018 * this because either it can't run here any more (set_cpus_allowed()
4019 * away from this CPU, or CPU going down), or because we're
4020 * attempting to rebalance this task on exec (sched_exec).
4021 *
4022 * So we race with normal scheduler movements, but that's OK, as long
4023 * as the task is no longer on this CPU.
4024 *
4025 * Returns non-zero if task was successfully migrated.
4026 */
4027static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4028{
4029	struct rq *rq_dest, *rq_src;
4030	int ret = 0;
4031
4032	if (unlikely(!cpu_active(dest_cpu)))
4033		return ret;
4034
4035	rq_src = cpu_rq(src_cpu);
4036	rq_dest = cpu_rq(dest_cpu);
4037
4038	raw_spin_lock(&p->pi_lock);
4039	double_rq_lock(rq_src, rq_dest);
4040	/* Already moved. */
4041	if (task_cpu(p) != src_cpu)
4042		goto done;
4043	/* Affinity changed (again). */
4044	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
4045		goto fail;
4046
4047	/*
4048	 * If we're not on a rq, the next wake-up will ensure we're
4049	 * placed properly.
4050	 */
4051	if (p->on_rq) {
4052		dequeue_task(rq_src, p, 0);
4053		set_task_cpu(p, dest_cpu);
4054		enqueue_task(rq_dest, p, 0);
4055		check_preempt_curr(rq_dest, p, 0);
4056	}
4057done:
4058	ret = 1;
4059fail:
4060	double_rq_unlock(rq_src, rq_dest);
4061	raw_spin_unlock(&p->pi_lock);
4062	return ret;
4063}
4064
4065#ifdef CONFIG_NUMA_BALANCING
4066/* Migrate current task p to target_cpu */
4067int migrate_task_to(struct task_struct *p, int target_cpu)
4068{
4069	struct migration_arg arg = { p, target_cpu };
4070	int curr_cpu = task_cpu(p);
4071
4072	if (curr_cpu == target_cpu)
4073		return 0;
4074
4075	if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
4076		return -EINVAL;
4077
4078	/* TODO: This is not properly updating schedstats */
4079
4080	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
4081}
4082
4083/*
4084 * Requeue a task on a given node and accurately track the number of NUMA
4085 * tasks on the runqueues
4086 */
4087void sched_setnuma(struct task_struct *p, int nid)
4088{
4089	struct rq *rq;
4090	unsigned long flags;
4091	bool on_rq, running;
4092
4093	rq = task_rq_lock(p, &flags);
4094	on_rq = p->on_rq;
4095	running = task_current(rq, p);
4096
4097	if (on_rq)
4098		dequeue_task(rq, p, 0);
4099	if (running)
4100		p->sched_class->put_prev_task(rq, p);
4101
4102	p->numa_preferred_nid = nid;
4103
4104	if (running)
4105		p->sched_class->set_curr_task(rq);
4106	if (on_rq)
4107		enqueue_task(rq, p, 0);
4108	task_rq_unlock(rq, p, &flags);
4109}
4110#endif
4111
4112/*
4113 * migration_cpu_stop - this will be executed by a highprio stopper thread
4114 * and performs thread migration by bumping thread off CPU then
4115 * 'pushing' onto another runqueue.
4116 */
4117static int migration_cpu_stop(void *data)
4118{
4119	struct migration_arg *arg = data;
4120
4121	/*
4122	 * The original target cpu might have gone down and we might
4123	 * be on another cpu but it doesn't matter.
4124	 */
4125	local_irq_disable();
4126	__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
4127	local_irq_enable();
4128	return 0;
4129}
4130
4131#ifdef CONFIG_HOTPLUG_CPU
4132
4133/*
4134 * Ensures that the idle task is using init_mm right before its cpu goes
4135 * offline.
4136 */
4137void idle_task_exit(void)
4138{
4139	struct mm_struct *mm = current->active_mm;
4140
4141	BUG_ON(cpu_online(smp_processor_id()));
4142
4143	if (mm != &init_mm)
4144		switch_mm(mm, &init_mm, current);
4145	mmdrop(mm);
4146}
4147
4148/*
4149 * Since this CPU is going 'away' for a while, fold any nr_active delta
4150 * we might have. Assumes we're called after migrate_tasks() so that the
4151 * nr_active count is stable.
4152 *
4153 * Also see the comment "Global load-average calculations".
4154 */
4155static void calc_load_migrate(struct rq *rq)
4156{
4157	long delta = calc_load_fold_active(rq);
4158	if (delta)
4159		atomic_long_add(delta, &calc_load_tasks);
4160}
4161
4162/*
4163 * Migrate all tasks from the rq, sleeping tasks will be migrated by
4164 * try_to_wake_up()->select_task_rq().
4165 *
4166 * Called with rq->lock held even though we'er in stop_machine() and
4167 * there's no concurrency possible, we hold the required locks anyway
4168 * because of lock validation efforts.
4169 */
4170static void migrate_tasks(unsigned int dead_cpu)
4171{
4172	struct rq *rq = cpu_rq(dead_cpu);
4173	struct task_struct *next, *stop = rq->stop;
4174	int dest_cpu;
4175
4176	/*
4177	 * Fudge the rq selection such that the below task selection loop
4178	 * doesn't get stuck on the currently eligible stop task.
4179	 *
4180	 * We're currently inside stop_machine() and the rq is either stuck
4181	 * in the stop_machine_cpu_stop() loop, or we're executing this code,
4182	 * either way we should never end up calling schedule() until we're
4183	 * done here.
4184	 */
4185	rq->stop = NULL;
4186
4187	/*
4188	 * put_prev_task() and pick_next_task() sched
4189	 * class method both need to have an up-to-date
4190	 * value of rq->clock[_task]
4191	 */
4192	update_rq_clock(rq);
4193
4194	for ( ; ; ) {
4195		/*
4196		 * There's this thread running, bail when that's the only
4197		 * remaining thread.
4198		 */
4199		if (rq->nr_running == 1)
4200			break;
4201
4202		next = pick_next_task(rq);
4203		BUG_ON(!next);
4204		next->sched_class->put_prev_task(rq, next);
4205
4206		/* Find suitable destination for @next, with force if needed. */
4207		dest_cpu = select_fallback_rq(dead_cpu, next);
4208		raw_spin_unlock(&rq->lock);
4209
4210		__migrate_task(next, dead_cpu, dest_cpu);
4211
4212		raw_spin_lock(&rq->lock);
4213	}
4214
4215	rq->stop = stop;
4216}
4217
4218#endif /* CONFIG_HOTPLUG_CPU */
4219
4220#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
4221
4222static struct ctl_table sd_ctl_dir[] = {
4223	{
4224		.procname	= "sched_domain",
4225		.mode		= 0555,
4226	},
4227	{}
4228};
4229
4230static struct ctl_table sd_ctl_root[] = {
4231	{
4232		.procname	= "kernel",
4233		.mode		= 0555,
4234		.child		= sd_ctl_dir,
4235	},
4236	{}
4237};
4238
4239static struct ctl_table *sd_alloc_ctl_entry(int n)
4240{
4241	struct ctl_table *entry =
4242		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
4243
4244	return entry;
4245}
4246
4247static void sd_free_ctl_entry(struct ctl_table **tablep)
4248{
4249	struct ctl_table *entry;
4250
4251	/*
4252	 * In the intermediate directories, both the child directory and
4253	 * procname are dynamically allocated and could fail but the mode
4254	 * will always be set. In the lowest directory the names are
4255	 * static strings and all have proc handlers.
4256	 */
4257	for (entry = *tablep; entry->mode; entry++) {
4258		if (entry->child)
4259			sd_free_ctl_entry(&entry->child);
4260		if (entry->proc_handler == NULL)
4261			kfree(entry->procname);
4262	}
4263
4264	kfree(*tablep);
4265	*tablep = NULL;
4266}
4267
4268static int min_load_idx = 0;
4269static int max_load_idx = CPU_LOAD_IDX_MAX-1;
4270
4271static void
4272set_table_entry(struct ctl_table *entry,
4273		const char *procname, void *data, int maxlen,
4274		umode_t mode, proc_handler *proc_handler,
4275		bool load_idx)
4276{
4277	entry->procname = procname;
4278	entry->data = data;
4279	entry->maxlen = maxlen;
4280	entry->mode = mode;
4281	entry->proc_handler = proc_handler;
4282
4283	if (load_idx) {
4284		entry->extra1 = &min_load_idx;
4285		entry->extra2 = &max_load_idx;
4286	}
4287}
4288
4289static struct ctl_table *
4290sd_alloc_ctl_domain_table(struct sched_domain *sd)
4291{
4292	struct ctl_table *table = sd_alloc_ctl_entry(13);
4293
4294	if (table == NULL)
4295		return NULL;
4296
4297	set_table_entry(&table[0], "min_interval", &sd->min_interval,
4298		sizeof(long), 0644, proc_doulongvec_minmax, false);
4299	set_table_entry(&table[1], "max_interval", &sd->max_interval,
4300		sizeof(long), 0644, proc_doulongvec_minmax, false);
4301	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
4302		sizeof(int), 0644, proc_dointvec_minmax, true);
4303	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
4304		sizeof(int), 0644, proc_dointvec_minmax, true);
4305	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
4306		sizeof(int), 0644, proc_dointvec_minmax, true);
4307	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
4308		sizeof(int), 0644, proc_dointvec_minmax, true);
4309	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
4310		sizeof(int), 0644, proc_dointvec_minmax, true);
4311	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
4312		sizeof(int), 0644, proc_dointvec_minmax, false);
4313	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
4314		sizeof(int), 0644, proc_dointvec_minmax, false);
4315	set_table_entry(&table[9], "cache_nice_tries",
4316		&sd->cache_nice_tries,
4317		sizeof(int), 0644, proc_dointvec_minmax, false);
4318	set_table_entry(&table[10], "flags", &sd->flags,
4319		sizeof(int), 0644, proc_dointvec_minmax, false);
4320	set_table_entry(&table[11], "name", sd->name,
4321		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
4322	/* &table[12] is terminator */
4323
4324	return table;
4325}
4326
4327static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
4328{
4329	struct ctl_table *entry, *table;
4330	struct sched_domain *sd;
4331	int domain_num = 0, i;
4332	char buf[32];
4333
4334	for_each_domain(cpu, sd)
4335		domain_num++;
4336	entry = table = sd_alloc_ctl_entry(domain_num + 1);
4337	if (table == NULL)
4338		return NULL;
4339
4340	i = 0;
4341	for_each_domain(cpu, sd) {
4342		snprintf(buf, 32, "domain%d", i);
4343		entry->procname = kstrdup(buf, GFP_KERNEL);
4344		entry->mode = 0555;
4345		entry->child = sd_alloc_ctl_domain_table(sd);
4346		entry++;
4347		i++;
4348	}
4349	return table;
4350}
4351
4352static struct ctl_table_header *sd_sysctl_header;
4353static void register_sched_domain_sysctl(void)
4354{
4355	int i, cpu_num = num_possible_cpus();
4356	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4357	char buf[32];
4358
4359	WARN_ON(sd_ctl_dir[0].child);
4360	sd_ctl_dir[0].child = entry;
4361
4362	if (entry == NULL)
4363		return;
4364
4365	for_each_possible_cpu(i) {
4366		snprintf(buf, 32, "cpu%d", i);
4367		entry->procname = kstrdup(buf, GFP_KERNEL);
4368		entry->mode = 0555;
4369		entry->child = sd_alloc_ctl_cpu_table(i);
4370		entry++;
4371	}
4372
4373	WARN_ON(sd_sysctl_header);
4374	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
4375}
4376
4377/* may be called multiple times per register */
4378static void unregister_sched_domain_sysctl(void)
4379{
4380	if (sd_sysctl_header)
4381		unregister_sysctl_table(sd_sysctl_header);
4382	sd_sysctl_header = NULL;
4383	if (sd_ctl_dir[0].child)
4384		sd_free_ctl_entry(&sd_ctl_dir[0].child);
4385}
4386#else
4387static void register_sched_domain_sysctl(void)
4388{
4389}
4390static void unregister_sched_domain_sysctl(void)
4391{
4392}
4393#endif
4394
4395static void set_rq_online(struct rq *rq)
4396{
4397	if (!rq->online) {
4398		const struct sched_class *class;
4399
4400		cpumask_set_cpu(rq->cpu, rq->rd->online);
4401		rq->online = 1;
4402
4403		for_each_class(class) {
4404			if (class->rq_online)
4405				class->rq_online(rq);
4406		}
4407	}
4408}
4409
4410static void set_rq_offline(struct rq *rq)
4411{
4412	if (rq->online) {
4413		const struct sched_class *class;
4414
4415		for_each_class(class) {
4416			if (class->rq_offline)
4417				class->rq_offline(rq);
4418		}
4419
4420		cpumask_clear_cpu(rq->cpu, rq->rd->online);
4421		rq->online = 0;
4422	}
4423}
4424
4425/*
4426 * migration_call - callback that gets triggered when a CPU is added.
4427 * Here we can start up the necessary migration thread for the new CPU.
4428 */
4429static int
4430migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
4431{
4432	int cpu = (long)hcpu;
4433	unsigned long flags;
4434	struct rq *rq = cpu_rq(cpu);
4435
4436	switch (action & ~CPU_TASKS_FROZEN) {
4437
4438	case CPU_UP_PREPARE:
4439		rq->calc_load_update = calc_load_update;
4440		break;
4441
4442	case CPU_ONLINE:
4443		/* Update our root-domain */
4444		raw_spin_lock_irqsave(&rq->lock, flags);
4445		if (rq->rd) {
4446			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
4447
4448			set_rq_online(rq);
4449		}
4450		raw_spin_unlock_irqrestore(&rq->lock, flags);
4451		break;
4452
4453#ifdef CONFIG_HOTPLUG_CPU
4454	case CPU_DYING:
4455		sched_ttwu_pending();
4456		/* Update our root-domain */
4457		raw_spin_lock_irqsave(&rq->lock, flags);
4458		if (rq->rd) {
4459			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
4460			set_rq_offline(rq);
4461		}
4462		migrate_tasks(cpu);
4463		BUG_ON(rq->nr_running != 1); /* the migration thread */
4464		raw_spin_unlock_irqrestore(&rq->lock, flags);
4465		break;
4466
4467	case CPU_DEAD:
4468		calc_load_migrate(rq);
4469		break;
4470#endif
4471	}
4472
4473	update_max_interval();
4474
4475	return NOTIFY_OK;
4476}
4477
4478/*
4479 * Register at high priority so that task migration (migrate_all_tasks)
4480 * happens before everything else.  This has to be lower priority than
4481 * the notifier in the perf_event subsystem, though.
4482 */
4483static struct notifier_block migration_notifier = {
4484	.notifier_call = migration_call,
4485	.priority = CPU_PRI_MIGRATION,
4486};
4487
4488static int sched_cpu_active(struct notifier_block *nfb,
4489				      unsigned long action, void *hcpu)
4490{
4491	switch (action & ~CPU_TASKS_FROZEN) {
4492	case CPU_STARTING:
4493	case CPU_DOWN_FAILED:
4494		set_cpu_active((long)hcpu, true);
4495		return NOTIFY_OK;
4496	default:
4497		return NOTIFY_DONE;
4498	}
4499}
4500
4501static int sched_cpu_inactive(struct notifier_block *nfb,
4502					unsigned long action, void *hcpu)
4503{
4504	switch (action & ~CPU_TASKS_FROZEN) {
4505	case CPU_DOWN_PREPARE:
4506		set_cpu_active((long)hcpu, false);
4507		return NOTIFY_OK;
4508	default:
4509		return NOTIFY_DONE;
4510	}
4511}
4512
4513static int __init migration_init(void)
4514{
4515	void *cpu = (void *)(long)smp_processor_id();
4516	int err;
4517
4518	/* Initialize migration for the boot CPU */
4519	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
4520	BUG_ON(err == NOTIFY_BAD);
4521	migration_call(&migration_notifier, CPU_ONLINE, cpu);
4522	register_cpu_notifier(&migration_notifier);
4523
4524	/* Register cpu active notifiers */
4525	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
4526	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
4527
4528	return 0;
4529}
4530early_initcall(migration_init);
4531#endif
4532
4533#ifdef CONFIG_SMP
4534
4535static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
4536
4537#ifdef CONFIG_SCHED_DEBUG
4538
4539static __read_mostly int sched_debug_enabled;
4540
4541static int __init sched_debug_setup(char *str)
4542{
4543	sched_debug_enabled = 1;
4544
4545	return 0;
4546}
4547early_param("sched_debug", sched_debug_setup);
4548
4549static inline bool sched_debug(void)
4550{
4551	return sched_debug_enabled;
4552}
4553
4554static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
4555				  struct cpumask *groupmask)
4556{
4557	struct sched_group *group = sd->groups;
4558	char str[256];
4559
4560	cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
4561	cpumask_clear(groupmask);
4562
4563	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
4564
4565	if (!(sd->flags & SD_LOAD_BALANCE)) {
4566		printk("does not load-balance\n");
4567		if (sd->parent)
4568			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
4569					" has parent");
4570		return -1;
4571	}
4572
4573	printk(KERN_CONT "span %s level %s\n", str, sd->name);
4574
4575	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
4576		printk(KERN_ERR "ERROR: domain->span does not contain "
4577				"CPU%d\n", cpu);
4578	}
4579	if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
4580		printk(KERN_ERR "ERROR: domain->groups does not contain"
4581				" CPU%d\n", cpu);
4582	}
4583
4584	printk(KERN_DEBUG "%*s groups:", level + 1, "");
4585	do {
4586		if (!group) {
4587			printk("\n");
4588			printk(KERN_ERR "ERROR: group is NULL\n");
4589			break;
4590		}
4591
4592		/*
4593		 * Even though we initialize ->power to something semi-sane,
4594		 * we leave power_orig unset. This allows us to detect if
4595		 * domain iteration is still funny without causing /0 traps.
4596		 */
4597		if (!group->sgp->power_orig) {
4598			printk(KERN_CONT "\n");
4599			printk(KERN_ERR "ERROR: domain->cpu_power not "
4600					"set\n");
4601			break;
4602		}
4603
4604		if (!cpumask_weight(sched_group_cpus(group))) {
4605			printk(KERN_CONT "\n");
4606			printk(KERN_ERR "ERROR: empty group\n");
4607			break;
4608		}
4609
4610		if (!(sd->flags & SD_OVERLAP) &&
4611		    cpumask_intersects(groupmask, sched_group_cpus(group))) {
4612			printk(KERN_CONT "\n");
4613			printk(KERN_ERR "ERROR: repeated CPUs\n");
4614			break;
4615		}
4616
4617		cpumask_or(groupmask, groupmask, sched_group_cpus(group));
4618
4619		cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
4620
4621		printk(KERN_CONT " %s", str);
4622		if (group->sgp->power != SCHED_POWER_SCALE) {
4623			printk(KERN_CONT " (cpu_power = %d)",
4624				group->sgp->power);
4625		}
4626
4627		group = group->next;
4628	} while (group != sd->groups);
4629	printk(KERN_CONT "\n");
4630
4631	if (!cpumask_equal(sched_domain_span(sd), groupmask))
4632		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
4633
4634	if (sd->parent &&
4635	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
4636		printk(KERN_ERR "ERROR: parent span is not a superset "
4637			"of domain->span\n");
4638	return 0;
4639}
4640
4641static void sched_domain_debug(struct sched_domain *sd, int cpu)
4642{
4643	int level = 0;
4644
4645	if (!sched_debug_enabled)
4646		return;
4647
4648	if (!sd) {
4649		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
4650		return;
4651	}
4652
4653	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
4654
4655	for (;;) {
4656		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4657			break;
4658		level++;
4659		sd = sd->parent;
4660		if (!sd)
4661			break;
4662	}
4663}
4664#else /* !CONFIG_SCHED_DEBUG */
4665# define sched_domain_debug(sd, cpu) do { } while (0)
4666static inline bool sched_debug(void)
4667{
4668	return false;
4669}
4670#endif /* CONFIG_SCHED_DEBUG */
4671
4672static int sd_degenerate(struct sched_domain *sd)
4673{
4674	if (cpumask_weight(sched_domain_span(sd)) == 1)
4675		return 1;
4676
4677	/* Following flags need at least 2 groups */
4678	if (sd->flags & (SD_LOAD_BALANCE |
4679			 SD_BALANCE_NEWIDLE |
4680			 SD_BALANCE_FORK |
4681			 SD_BALANCE_EXEC |
4682			 SD_SHARE_CPUPOWER |
4683			 SD_SHARE_PKG_RESOURCES)) {
4684		if (sd->groups != sd->groups->next)
4685			return 0;
4686	}
4687
4688	/* Following flags don't use groups */
4689	if (sd->flags & (SD_WAKE_AFFINE))
4690		return 0;
4691
4692	return 1;
4693}
4694
4695static int
4696sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
4697{
4698	unsigned long cflags = sd->flags, pflags = parent->flags;
4699
4700	if (sd_degenerate(parent))
4701		return 1;
4702
4703	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
4704		return 0;
4705
4706	/* Flags needing groups don't count if only 1 group in parent */
4707	if (parent->groups == parent->groups->next) {
4708		pflags &= ~(SD_LOAD_BALANCE |
4709				SD_BALANCE_NEWIDLE |
4710				SD_BALANCE_FORK |
4711				SD_BALANCE_EXEC |
4712				SD_SHARE_CPUPOWER |
4713				SD_SHARE_PKG_RESOURCES |
4714				SD_PREFER_SIBLING);
4715		if (nr_node_ids == 1)
4716			pflags &= ~SD_SERIALIZE;
4717	}
4718	if (~cflags & pflags)
4719		return 0;
4720
4721	return 1;
4722}
4723
4724static void free_rootdomain(struct rcu_head *rcu)
4725{
4726	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
4727
4728	cpupri_cleanup(&rd->cpupri);
4729	free_cpumask_var(rd->rto_mask);
4730	free_cpumask_var(rd->online);
4731	free_cpumask_var(rd->span);
4732	kfree(rd);
4733}
4734
4735static void rq_attach_root(struct rq *rq, struct root_domain *rd)
4736{
4737	struct root_domain *old_rd = NULL;
4738	unsigned long flags;
4739
4740	raw_spin_lock_irqsave(&rq->lock, flags);
4741
4742	if (rq->rd) {
4743		old_rd = rq->rd;
4744
4745		if (cpumask_test_cpu(rq->cpu, old_rd->online))
4746			set_rq_offline(rq);
4747
4748		cpumask_clear_cpu(rq->cpu, old_rd->span);
4749
4750		/*
4751		 * If we dont want to free the old_rt yet then
4752		 * set old_rd to NULL to skip the freeing later
4753		 * in this function:
4754		 */
4755		if (!atomic_dec_and_test(&old_rd->refcount))
4756			old_rd = NULL;
4757	}
4758
4759	atomic_inc(&rd->refcount);
4760	rq->rd = rd;
4761
4762	cpumask_set_cpu(rq->cpu, rd->span);
4763	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
4764		set_rq_online(rq);
4765
4766	raw_spin_unlock_irqrestore(&rq->lock, flags);
4767
4768	if (old_rd)
4769		call_rcu_sched(&old_rd->rcu, free_rootdomain);
4770}
4771
4772static int init_rootdomain(struct root_domain *rd)
4773{
4774	memset(rd, 0, sizeof(*rd));
4775
4776	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
4777		goto out;
4778	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
4779		goto free_span;
4780	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
4781		goto free_online;
4782
4783	if (cpupri_init(&rd->cpupri) != 0)
4784		goto free_rto_mask;
4785	return 0;
4786
4787free_rto_mask:
4788	free_cpumask_var(rd->rto_mask);
4789free_online:
4790	free_cpumask_var(rd->online);
4791free_span:
4792	free_cpumask_var(rd->span);
4793out:
4794	return -ENOMEM;
4795}
4796
4797/*
4798 * By default the system creates a single root-domain with all cpus as
4799 * members (mimicking the global state we have today).
4800 */
4801struct root_domain def_root_domain;
4802
4803static void init_defrootdomain(void)
4804{
4805	init_rootdomain(&def_root_domain);
4806
4807	atomic_set(&def_root_domain.refcount, 1);
4808}
4809
4810static struct root_domain *alloc_rootdomain(void)
4811{
4812	struct root_domain *rd;
4813
4814	rd = kmalloc(sizeof(*rd), GFP_KERNEL);
4815	if (!rd)
4816		return NULL;
4817
4818	if (init_rootdomain(rd) != 0) {
4819		kfree(rd);
4820		return NULL;
4821	}
4822
4823	return rd;
4824}
4825
4826static void free_sched_groups(struct sched_group *sg, int free_sgp)
4827{
4828	struct sched_group *tmp, *first;
4829
4830	if (!sg)
4831		return;
4832
4833	first = sg;
4834	do {
4835		tmp = sg->next;
4836
4837		if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
4838			kfree(sg->sgp);
4839
4840		kfree(sg);
4841		sg = tmp;
4842	} while (sg != first);
4843}
4844
4845static void free_sched_domain(struct rcu_head *rcu)
4846{
4847	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
4848
4849	/*
4850	 * If its an overlapping domain it has private groups, iterate and
4851	 * nuke them all.
4852	 */
4853	if (sd->flags & SD_OVERLAP) {
4854		free_sched_groups(sd->groups, 1);
4855	} else if (atomic_dec_and_test(&sd->groups->ref)) {
4856		kfree(sd->groups->sgp);
4857		kfree(sd->groups);
4858	}
4859	kfree(sd);
4860}
4861
4862static void destroy_sched_domain(struct sched_domain *sd, int cpu)
4863{
4864	call_rcu(&sd->rcu, free_sched_domain);
4865}
4866
4867static void destroy_sched_domains(struct sched_domain *sd, int cpu)
4868{
4869	for (; sd; sd = sd->parent)
4870		destroy_sched_domain(sd, cpu);
4871}
4872
4873/*
4874 * Keep a special pointer to the highest sched_domain that has
4875 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
4876 * allows us to avoid some pointer chasing select_idle_sibling().
4877 *
4878 * Also keep a unique ID per domain (we use the first cpu number in
4879 * the cpumask of the domain), this allows us to quickly tell if
4880 * two cpus are in the same cache domain, see cpus_share_cache().
4881 */
4882DEFINE_PER_CPU(struct sched_domain *, sd_llc);
4883DEFINE_PER_CPU(int, sd_llc_size);
4884DEFINE_PER_CPU(int, sd_llc_id);
4885DEFINE_PER_CPU(struct sched_domain *, sd_numa);
4886
4887static void update_top_cache_domain(int cpu)
4888{
4889	struct sched_domain *sd;
4890	int id = cpu;
4891	int size = 1;
4892
4893	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
4894	if (sd) {
4895		id = cpumask_first(sched_domain_span(sd));
4896		size = cpumask_weight(sched_domain_span(sd));
4897	}
4898
4899	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
4900	per_cpu(sd_llc_size, cpu) = size;
4901	per_cpu(sd_llc_id, cpu) = id;
4902
4903	sd = lowest_flag_domain(cpu, SD_NUMA);
4904	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
4905}
4906
4907/*
4908 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
4909 * hold the hotplug lock.
4910 */
4911static void
4912cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
4913{
4914	struct rq *rq = cpu_rq(cpu);
4915	struct sched_domain *tmp;
4916
4917	/* Remove the sched domains which do not contribute to scheduling. */
4918	for (tmp = sd; tmp; ) {
4919		struct sched_domain *parent = tmp->parent;
4920		if (!parent)
4921			break;
4922
4923		if (sd_parent_degenerate(tmp, parent)) {
4924			tmp->parent = parent->parent;
4925			if (parent->parent)
4926				parent->parent->child = tmp;
4927			/*
4928			 * Transfer SD_PREFER_SIBLING down in case of a
4929			 * degenerate parent; the spans match for this
4930			 * so the property transfers.
4931			 */
4932			if (parent->flags & SD_PREFER_SIBLING)
4933				tmp->flags |= SD_PREFER_SIBLING;
4934			destroy_sched_domain(parent, cpu);
4935		} else
4936			tmp = tmp->parent;
4937	}
4938
4939	if (sd && sd_degenerate(sd)) {
4940		tmp = sd;
4941		sd = sd->parent;
4942		destroy_sched_domain(tmp, cpu);
4943		if (sd)
4944			sd->child = NULL;
4945	}
4946
4947	sched_domain_debug(sd, cpu);
4948
4949	rq_attach_root(rq, rd);
4950	tmp = rq->sd;
4951	rcu_assign_pointer(rq->sd, sd);
4952	destroy_sched_domains(tmp, cpu);
4953
4954	update_top_cache_domain(cpu);
4955}
4956
4957/* cpus with isolated domains */
4958static cpumask_var_t cpu_isolated_map;
4959
4960/* Setup the mask of cpus configured for isolated domains */
4961static int __init isolated_cpu_setup(char *str)
4962{
4963	alloc_bootmem_cpumask_var(&cpu_isolated_map);
4964	cpulist_parse(str, cpu_isolated_map);
4965	return 1;
4966}
4967
4968__setup("isolcpus=", isolated_cpu_setup);
4969
4970static const struct cpumask *cpu_cpu_mask(int cpu)
4971{
4972	return cpumask_of_node(cpu_to_node(cpu));
4973}
4974
4975struct sd_data {
4976	struct sched_domain **__percpu sd;
4977	struct sched_group **__percpu sg;
4978	struct sched_group_power **__percpu sgp;
4979};
4980
4981struct s_data {
4982	struct sched_domain ** __percpu sd;
4983	struct root_domain	*rd;
4984};
4985
4986enum s_alloc {
4987	sa_rootdomain,
4988	sa_sd,
4989	sa_sd_storage,
4990	sa_none,
4991};
4992
4993struct sched_domain_topology_level;
4994
4995typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
4996typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
4997
4998#define SDTL_OVERLAP	0x01
4999
5000struct sched_domain_topology_level {
5001	sched_domain_init_f init;
5002	sched_domain_mask_f mask;
5003	int		    flags;
5004	int		    numa_level;
5005	struct sd_data      data;
5006};
5007
5008/*
5009 * Build an iteration mask that can exclude certain CPUs from the upwards
5010 * domain traversal.
5011 *
5012 * Asymmetric node setups can result in situations where the domain tree is of
5013 * unequal depth, make sure to skip domains that already cover the entire
5014 * range.
5015 *
5016 * In that case build_sched_domains() will have terminated the iteration early
5017 * and our sibling sd spans will be empty. Domains should always include the
5018 * cpu they're built on, so check that.
5019 *
5020 */
5021static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5022{
5023	const struct cpumask *span = sched_domain_span(sd);
5024	struct sd_data *sdd = sd->private;
5025	struct sched_domain *sibling;
5026	int i;
5027
5028	for_each_cpu(i, span) {
5029		sibling = *per_cpu_ptr(sdd->sd, i);
5030		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5031			continue;
5032
5033		cpumask_set_cpu(i, sched_group_mask(sg));
5034	}
5035}
5036
5037/*
5038 * Return the canonical balance cpu for this group, this is the first cpu
5039 * of this group that's also in the iteration mask.
5040 */
5041int group_balance_cpu(struct sched_group *sg)
5042{
5043	return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5044}
5045
5046static int
5047build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5048{
5049	struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5050	const struct cpumask *span = sched_domain_span(sd);
5051	struct cpumask *covered = sched_domains_tmpmask;
5052	struct sd_data *sdd = sd->private;
5053	struct sched_domain *child;
5054	int i;
5055
5056	cpumask_clear(covered);
5057
5058	for_each_cpu(i, span) {
5059		struct cpumask *sg_span;
5060
5061		if (cpumask_test_cpu(i, covered))
5062			continue;
5063
5064		child = *per_cpu_ptr(sdd->sd, i);
5065
5066		/* See the comment near build_group_mask(). */
5067		if (!cpumask_test_cpu(i, sched_domain_span(child)))
5068			continue;
5069
5070		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5071				GFP_KERNEL, cpu_to_node(cpu));
5072
5073		if (!sg)
5074			goto fail;
5075
5076		sg_span = sched_group_cpus(sg);
5077		if (child->child) {
5078			child = child->child;
5079			cpumask_copy(sg_span, sched_domain_span(child));
5080		} else
5081			cpumask_set_cpu(i, sg_span);
5082
5083		cpumask_or(covered, covered, sg_span);
5084
5085		sg->sgp = *per_cpu_ptr(sdd->sgp, i);
5086		if (atomic_inc_return(&sg->sgp->ref) == 1)
5087			build_group_mask(sd, sg);
5088
5089		/*
5090		 * Initialize sgp->power such that even if we mess up the
5091		 * domains and no possible iteration will get us here, we won't
5092		 * die on a /0 trap.
5093		 */
5094		sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
5095
5096		/*
5097		 * Make sure the first group of this domain contains the
5098		 * canonical balance cpu. Otherwise the sched_domain iteration
5099		 * breaks. See update_sg_lb_stats().
5100		 */
5101		if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
5102		    group_balance_cpu(sg) == cpu)
5103			groups = sg;
5104
5105		if (!first)
5106			first = sg;
5107		if (last)
5108			last->next = sg;
5109		last = sg;
5110		last->next = first;
5111	}
5112	sd->groups = groups;
5113
5114	return 0;
5115
5116fail:
5117	free_sched_groups(first, 0);
5118
5119	return -ENOMEM;
5120}
5121
5122static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
5123{
5124	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5125	struct sched_domain *child = sd->child;
5126
5127	if (child)
5128		cpu = cpumask_first(sched_domain_span(child));
5129
5130	if (sg) {
5131		*sg = *per_cpu_ptr(sdd->sg, cpu);
5132		(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
5133		atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
5134	}
5135
5136	return cpu;
5137}
5138
5139/*
5140 * build_sched_groups will build a circular linked list of the groups
5141 * covered by the given span, and will set each group's ->cpumask correctly,
5142 * and ->cpu_power to 0.
5143 *
5144 * Assumes the sched_domain tree is fully constructed
5145 */
5146static int
5147build_sched_groups(struct sched_domain *sd, int cpu)
5148{
5149	struct sched_group *first = NULL, *last = NULL;
5150	struct sd_data *sdd = sd->private;
5151	const struct cpumask *span = sched_domain_span(sd);
5152	struct cpumask *covered;
5153	int i;
5154
5155	get_group(cpu, sdd, &sd->groups);
5156	atomic_inc(&sd->groups->ref);
5157
5158	if (cpu != cpumask_first(span))
5159		return 0;
5160
5161	lockdep_assert_held(&sched_domains_mutex);
5162	covered = sched_domains_tmpmask;
5163
5164	cpumask_clear(covered);
5165
5166	for_each_cpu(i, span) {
5167		struct sched_group *sg;
5168		int group, j;
5169
5170		if (cpumask_test_cpu(i, covered))
5171			continue;
5172
5173		group = get_group(i, sdd, &sg);
5174		cpumask_clear(sched_group_cpus(sg));
5175		sg->sgp->power = 0;
5176		cpumask_setall(sched_group_mask(sg));
5177
5178		for_each_cpu(j, span) {
5179			if (get_group(j, sdd, NULL) != group)
5180				continue;
5181
5182			cpumask_set_cpu(j, covered);
5183			cpumask_set_cpu(j, sched_group_cpus(sg));
5184		}
5185
5186		if (!first)
5187			first = sg;
5188		if (last)
5189			last->next = sg;
5190		last = sg;
5191	}
5192	last->next = first;
5193
5194	return 0;
5195}
5196
5197/*
5198 * Initialize sched groups cpu_power.
5199 *
5200 * cpu_power indicates the capacity of sched group, which is used while
5201 * distributing the load between different sched groups in a sched domain.
5202 * Typically cpu_power for all the groups in a sched domain will be same unless
5203 * there are asymmetries in the topology. If there are asymmetries, group
5204 * having more cpu_power will pickup more load compared to the group having
5205 * less cpu_power.
5206 */
5207static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5208{
5209	struct sched_group *sg = sd->groups;
5210
5211	WARN_ON(!sg);
5212
5213	do {
5214		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
5215		sg = sg->next;
5216	} while (sg != sd->groups);
5217
5218	if (cpu != group_balance_cpu(sg))
5219		return;
5220
5221	update_group_power(sd, cpu);
5222	atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
5223}
5224
5225int __weak arch_sd_sibling_asym_packing(void)
5226{
5227       return 0*SD_ASYM_PACKING;
5228}
5229
5230/*
5231 * Initializers for schedule domains
5232 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
5233 */
5234
5235#ifdef CONFIG_SCHED_DEBUG
5236# define SD_INIT_NAME(sd, type)		sd->name = #type
5237#else
5238# define SD_INIT_NAME(sd, type)		do { } while (0)
5239#endif
5240
5241#define SD_INIT_FUNC(type)						\
5242static noinline struct sched_domain *					\
5243sd_init_##type(struct sched_domain_topology_level *tl, int cpu) 	\
5244{									\
5245	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);	\
5246	*sd = SD_##type##_INIT;						\
5247	SD_INIT_NAME(sd, type);						\
5248	sd->private = &tl->data;					\
5249	return sd;							\
5250}
5251
5252SD_INIT_FUNC(CPU)
5253#ifdef CONFIG_SCHED_SMT
5254 SD_INIT_FUNC(SIBLING)
5255#endif
5256#ifdef CONFIG_SCHED_MC
5257 SD_INIT_FUNC(MC)
5258#endif
5259#ifdef CONFIG_SCHED_BOOK
5260 SD_INIT_FUNC(BOOK)
5261#endif
5262
5263static int default_relax_domain_level = -1;
5264int sched_domain_level_max;
5265
5266static int __init setup_relax_domain_level(char *str)
5267{
5268	if (kstrtoint(str, 0, &default_relax_domain_level))
5269		pr_warn("Unable to set relax_domain_level\n");
5270
5271	return 1;
5272}
5273__setup("relax_domain_level=", setup_relax_domain_level);
5274
5275static void set_domain_attribute(struct sched_domain *sd,
5276				 struct sched_domain_attr *attr)
5277{
5278	int request;
5279
5280	if (!attr || attr->relax_domain_level < 0) {
5281		if (default_relax_domain_level < 0)
5282			return;
5283		else
5284			request = default_relax_domain_level;
5285	} else
5286		request = attr->relax_domain_level;
5287	if (request < sd->level) {
5288		/* turn off idle balance on this domain */
5289		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
5290	} else {
5291		/* turn on idle balance on this domain */
5292		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
5293	}
5294}
5295
5296static void __sdt_free(const struct cpumask *cpu_map);
5297static int __sdt_alloc(const struct cpumask *cpu_map);
5298
5299static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
5300				 const struct cpumask *cpu_map)
5301{
5302	switch (what) {
5303	case sa_rootdomain:
5304		if (!atomic_read(&d->rd->refcount))
5305			free_rootdomain(&d->rd->rcu); /* fall through */
5306	case sa_sd:
5307		free_percpu(d->sd); /* fall through */
5308	case sa_sd_storage:
5309		__sdt_free(cpu_map); /* fall through */
5310	case sa_none:
5311		break;
5312	}
5313}
5314
5315static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
5316						   const struct cpumask *cpu_map)
5317{
5318	memset(d, 0, sizeof(*d));
5319
5320	if (__sdt_alloc(cpu_map))
5321		return sa_sd_storage;
5322	d->sd = alloc_percpu(struct sched_domain *);
5323	if (!d->sd)
5324		return sa_sd_storage;
5325	d->rd = alloc_rootdomain();
5326	if (!d->rd)
5327		return sa_sd;
5328	return sa_rootdomain;
5329}
5330
5331/*
5332 * NULL the sd_data elements we've used to build the sched_domain and
5333 * sched_group structure so that the subsequent __free_domain_allocs()
5334 * will not free the data we're using.
5335 */
5336static void claim_allocations(int cpu, struct sched_domain *sd)
5337{
5338	struct sd_data *sdd = sd->private;
5339
5340	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
5341	*per_cpu_ptr(sdd->sd, cpu) = NULL;
5342
5343	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
5344		*per_cpu_ptr(sdd->sg, cpu) = NULL;
5345
5346	if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
5347		*per_cpu_ptr(sdd->sgp, cpu) = NULL;
5348}
5349
5350#ifdef CONFIG_SCHED_SMT
5351static const struct cpumask *cpu_smt_mask(int cpu)
5352{
5353	return topology_thread_cpumask(cpu);
5354}
5355#endif
5356
5357/*
5358 * Topology list, bottom-up.
5359 */
5360static struct sched_domain_topology_level default_topology[] = {
5361#ifdef CONFIG_SCHED_SMT
5362	{ sd_init_SIBLING, cpu_smt_mask, },
5363#endif
5364#ifdef CONFIG_SCHED_MC
5365	{ sd_init_MC, cpu_coregroup_mask, },
5366#endif
5367#ifdef CONFIG_SCHED_BOOK
5368	{ sd_init_BOOK, cpu_book_mask, },
5369#endif
5370	{ sd_init_CPU, cpu_cpu_mask, },
5371	{ NULL, },
5372};
5373
5374static struct sched_domain_topology_level *sched_domain_topology = default_topology;
5375
5376#define for_each_sd_topology(tl)			\
5377	for (tl = sched_domain_topology; tl->init; tl++)
5378
5379#ifdef CONFIG_NUMA
5380
5381static int sched_domains_numa_levels;
5382static int *sched_domains_numa_distance;
5383static struct cpumask ***sched_domains_numa_masks;
5384static int sched_domains_curr_level;
5385
5386static inline int sd_local_flags(int level)
5387{
5388	if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
5389		return 0;
5390
5391	return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
5392}
5393
5394static struct sched_domain *
5395sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
5396{
5397	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
5398	int level = tl->numa_level;
5399	int sd_weight = cpumask_weight(
5400			sched_domains_numa_masks[level][cpu_to_node(cpu)]);
5401
5402	*sd = (struct sched_domain){
5403		.min_interval		= sd_weight,
5404		.max_interval		= 2*sd_weight,
5405		.busy_factor		= 32,
5406		.imbalance_pct		= 125,
5407		.cache_nice_tries	= 2,
5408		.busy_idx		= 3,
5409		.idle_idx		= 2,
5410		.newidle_idx		= 0,
5411		.wake_idx		= 0,
5412		.forkexec_idx		= 0,
5413
5414		.flags			= 1*SD_LOAD_BALANCE
5415					| 1*SD_BALANCE_NEWIDLE
5416					| 0*SD_BALANCE_EXEC
5417					| 0*SD_BALANCE_FORK
5418					| 0*SD_BALANCE_WAKE
5419					| 0*SD_WAKE_AFFINE
5420					| 0*SD_SHARE_CPUPOWER
5421					| 0*SD_SHARE_PKG_RESOURCES
5422					| 1*SD_SERIALIZE
5423					| 0*SD_PREFER_SIBLING
5424					| 1*SD_NUMA
5425					| sd_local_flags(level)
5426					,
5427		.last_balance		= jiffies,
5428		.balance_interval	= sd_weight,
5429	};
5430	SD_INIT_NAME(sd, NUMA);
5431	sd->private = &tl->data;
5432
5433	/*
5434	 * Ugly hack to pass state to sd_numa_mask()...
5435	 */
5436	sched_domains_curr_level = tl->numa_level;
5437
5438	return sd;
5439}
5440
5441static const struct cpumask *sd_numa_mask(int cpu)
5442{
5443	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
5444}
5445
5446static void sched_numa_warn(const char *str)
5447{
5448	static int done = false;
5449	int i,j;
5450
5451	if (done)
5452		return;
5453
5454	done = true;
5455
5456	printk(KERN_WARNING "ERROR: %s\n\n", str);
5457
5458	for (i = 0; i < nr_node_ids; i++) {
5459		printk(KERN_WARNING "  ");
5460		for (j = 0; j < nr_node_ids; j++)
5461			printk(KERN_CONT "%02d ", node_distance(i,j));
5462		printk(KERN_CONT "\n");
5463	}
5464	printk(KERN_WARNING "\n");
5465}
5466
5467static bool find_numa_distance(int distance)
5468{
5469	int i;
5470
5471	if (distance == node_distance(0, 0))
5472		return true;
5473
5474	for (i = 0; i < sched_domains_numa_levels; i++) {
5475		if (sched_domains_numa_distance[i] == distance)
5476			return true;
5477	}
5478
5479	return false;
5480}
5481
5482static void sched_init_numa(void)
5483{
5484	int next_distance, curr_distance = node_distance(0, 0);
5485	struct sched_domain_topology_level *tl;
5486	int level = 0;
5487	int i, j, k;
5488
5489	sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
5490	if (!sched_domains_numa_distance)
5491		return;
5492
5493	/*
5494	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
5495	 * unique distances in the node_distance() table.
5496	 *
5497	 * Assumes node_distance(0,j) includes all distances in
5498	 * node_distance(i,j) in order to avoid cubic time.
5499	 */
5500	next_distance = curr_distance;
5501	for (i = 0; i < nr_node_ids; i++) {
5502		for (j = 0; j < nr_node_ids; j++) {
5503			for (k = 0; k < nr_node_ids; k++) {
5504				int distance = node_distance(i, k);
5505
5506				if (distance > curr_distance &&
5507				    (distance < next_distance ||
5508				     next_distance == curr_distance))
5509					next_distance = distance;
5510
5511				/*
5512				 * While not a strong assumption it would be nice to know
5513				 * about cases where if node A is connected to B, B is not
5514				 * equally connected to A.
5515				 */
5516				if (sched_debug() && node_distance(k, i) != distance)
5517					sched_numa_warn("Node-distance not symmetric");
5518
5519				if (sched_debug() && i && !find_numa_distance(distance))
5520					sched_numa_warn("Node-0 not representative");
5521			}
5522			if (next_distance != curr_distance) {
5523				sched_domains_numa_distance[level++] = next_distance;
5524				sched_domains_numa_levels = level;
5525				curr_distance = next_distance;
5526			} else break;
5527		}
5528
5529		/*
5530		 * In case of sched_debug() we verify the above assumption.
5531		 */
5532		if (!sched_debug())
5533			break;
5534	}
5535	/*
5536	 * 'level' contains the number of unique distances, excluding the
5537	 * identity distance node_distance(i,i).
5538	 *
5539	 * The sched_domains_numa_distance[] array includes the actual distance
5540	 * numbers.
5541	 */
5542
5543	/*
5544	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
5545	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
5546	 * the array will contain less then 'level' members. This could be
5547	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
5548	 * in other functions.
5549	 *
5550	 * We reset it to 'level' at the end of this function.
5551	 */
5552	sched_domains_numa_levels = 0;
5553
5554	sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
5555	if (!sched_domains_numa_masks)
5556		return;
5557
5558	/*
5559	 * Now for each level, construct a mask per node which contains all
5560	 * cpus of nodes that are that many hops away from us.
5561	 */
5562	for (i = 0; i < level; i++) {
5563		sched_domains_numa_masks[i] =
5564			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
5565		if (!sched_domains_numa_masks[i])
5566			return;
5567
5568		for (j = 0; j < nr_node_ids; j++) {
5569			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
5570			if (!mask)
5571				return;
5572
5573			sched_domains_numa_masks[i][j] = mask;
5574
5575			for (k = 0; k < nr_node_ids; k++) {
5576				if (node_distance(j, k) > sched_domains_numa_distance[i])
5577					continue;
5578
5579				cpumask_or(mask, mask, cpumask_of_node(k));
5580			}
5581		}
5582	}
5583
5584	tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
5585			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
5586	if (!tl)
5587		return;
5588
5589	/*
5590	 * Copy the default topology bits..
5591	 */
5592	for (i = 0; default_topology[i].init; i++)
5593		tl[i] = default_topology[i];
5594
5595	/*
5596	 * .. and append 'j' levels of NUMA goodness.
5597	 */
5598	for (j = 0; j < level; i++, j++) {
5599		tl[i] = (struct sched_domain_topology_level){
5600			.init = sd_numa_init,
5601			.mask = sd_numa_mask,
5602			.flags = SDTL_OVERLAP,
5603			.numa_level = j,
5604		};
5605	}
5606
5607	sched_domain_topology = tl;
5608
5609	sched_domains_numa_levels = level;
5610}
5611
5612static void sched_domains_numa_masks_set(int cpu)
5613{
5614	int i, j;
5615	int node = cpu_to_node(cpu);
5616
5617	for (i = 0; i < sched_domains_numa_levels; i++) {
5618		for (j = 0; j < nr_node_ids; j++) {
5619			if (node_distance(j, node) <= sched_domains_numa_distance[i])
5620				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
5621		}
5622	}
5623}
5624
5625static void sched_domains_numa_masks_clear(int cpu)
5626{
5627	int i, j;
5628	for (i = 0; i < sched_domains_numa_levels; i++) {
5629		for (j = 0; j < nr_node_ids; j++)
5630			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
5631	}
5632}
5633
5634/*
5635 * Update sched_domains_numa_masks[level][node] array when new cpus
5636 * are onlined.
5637 */
5638static int sched_domains_numa_masks_update(struct notifier_block *nfb,
5639					   unsigned long action,
5640					   void *hcpu)
5641{
5642	int cpu = (long)hcpu;
5643
5644	switch (action & ~CPU_TASKS_FROZEN) {
5645	case CPU_ONLINE:
5646		sched_domains_numa_masks_set(cpu);
5647		break;
5648
5649	case CPU_DEAD:
5650		sched_domains_numa_masks_clear(cpu);
5651		break;
5652
5653	default:
5654		return NOTIFY_DONE;
5655	}
5656
5657	return NOTIFY_OK;
5658}
5659#else
5660static inline void sched_init_numa(void)
5661{
5662}
5663
5664static int sched_domains_numa_masks_update(struct notifier_block *nfb,
5665					   unsigned long action,
5666					   void *hcpu)
5667{
5668	return 0;
5669}
5670#endif /* CONFIG_NUMA */
5671
5672static int __sdt_alloc(const struct cpumask *cpu_map)
5673{
5674	struct sched_domain_topology_level *tl;
5675	int j;
5676
5677	for_each_sd_topology(tl) {
5678		struct sd_data *sdd = &tl->data;
5679
5680		sdd->sd = alloc_percpu(struct sched_domain *);
5681		if (!sdd->sd)
5682			return -ENOMEM;
5683
5684		sdd->sg = alloc_percpu(struct sched_group *);
5685		if (!sdd->sg)
5686			return -ENOMEM;
5687
5688		sdd->sgp = alloc_percpu(struct sched_group_power *);
5689		if (!sdd->sgp)
5690			return -ENOMEM;
5691
5692		for_each_cpu(j, cpu_map) {
5693			struct sched_domain *sd;
5694			struct sched_group *sg;
5695			struct sched_group_power *sgp;
5696
5697		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
5698					GFP_KERNEL, cpu_to_node(j));
5699			if (!sd)
5700				return -ENOMEM;
5701
5702			*per_cpu_ptr(sdd->sd, j) = sd;
5703
5704			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5705					GFP_KERNEL, cpu_to_node(j));
5706			if (!sg)
5707				return -ENOMEM;
5708
5709			sg->next = sg;
5710
5711			*per_cpu_ptr(sdd->sg, j) = sg;
5712
5713			sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
5714					GFP_KERNEL, cpu_to_node(j));
5715			if (!sgp)
5716				return -ENOMEM;
5717
5718			*per_cpu_ptr(sdd->sgp, j) = sgp;
5719		}
5720	}
5721
5722	return 0;
5723}
5724
5725static void __sdt_free(const struct cpumask *cpu_map)
5726{
5727	struct sched_domain_topology_level *tl;
5728	int j;
5729
5730	for_each_sd_topology(tl) {
5731		struct sd_data *sdd = &tl->data;
5732
5733		for_each_cpu(j, cpu_map) {
5734			struct sched_domain *sd;
5735
5736			if (sdd->sd) {
5737				sd = *per_cpu_ptr(sdd->sd, j);
5738				if (sd && (sd->flags & SD_OVERLAP))
5739					free_sched_groups(sd->groups, 0);
5740				kfree(*per_cpu_ptr(sdd->sd, j));
5741			}
5742
5743			if (sdd->sg)
5744				kfree(*per_cpu_ptr(sdd->sg, j));
5745			if (sdd->sgp)
5746				kfree(*per_cpu_ptr(sdd->sgp, j));
5747		}
5748		free_percpu(sdd->sd);
5749		sdd->sd = NULL;
5750		free_percpu(sdd->sg);
5751		sdd->sg = NULL;
5752		free_percpu(sdd->sgp);
5753		sdd->sgp = NULL;
5754	}
5755}
5756
5757struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
5758		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
5759		struct sched_domain *child, int cpu)
5760{
5761	struct sched_domain *sd = tl->init(tl, cpu);
5762	if (!sd)
5763		return child;
5764
5765	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
5766	if (child) {
5767		sd->level = child->level + 1;
5768		sched_domain_level_max = max(sched_domain_level_max, sd->level);
5769		child->parent = sd;
5770		sd->child = child;
5771	}
5772	set_domain_attribute(sd, attr);
5773
5774	return sd;
5775}
5776
5777/*
5778 * Build sched domains for a given set of cpus and attach the sched domains
5779 * to the individual cpus
5780 */
5781static int build_sched_domains(const struct cpumask *cpu_map,
5782			       struct sched_domain_attr *attr)
5783{
5784	enum s_alloc alloc_state;
5785	struct sched_domain *sd;
5786	struct s_data d;
5787	int i, ret = -ENOMEM;
5788
5789	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
5790	if (alloc_state != sa_rootdomain)
5791		goto error;
5792
5793	/* Set up domains for cpus specified by the cpu_map. */
5794	for_each_cpu(i, cpu_map) {
5795		struct sched_domain_topology_level *tl;
5796
5797		sd = NULL;
5798		for_each_sd_topology(tl) {
5799			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
5800			if (tl == sched_domain_topology)
5801				*per_cpu_ptr(d.sd, i) = sd;
5802			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
5803				sd->flags |= SD_OVERLAP;
5804			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
5805				break;
5806		}
5807	}
5808
5809	/* Build the groups for the domains */
5810	for_each_cpu(i, cpu_map) {
5811		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
5812			sd->span_weight = cpumask_weight(sched_domain_span(sd));
5813			if (sd->flags & SD_OVERLAP) {
5814				if (build_overlap_sched_groups(sd, i))
5815					goto error;
5816			} else {
5817				if (build_sched_groups(sd, i))
5818					goto error;
5819			}
5820		}
5821	}
5822
5823	/* Calculate CPU power for physical packages and nodes */
5824	for (i = nr_cpumask_bits-1; i >= 0; i--) {
5825		if (!cpumask_test_cpu(i, cpu_map))
5826			continue;
5827
5828		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
5829			claim_allocations(i, sd);
5830			init_sched_groups_power(i, sd);
5831		}
5832	}
5833
5834	/* Attach the domains */
5835	rcu_read_lock();
5836	for_each_cpu(i, cpu_map) {
5837		sd = *per_cpu_ptr(d.sd, i);
5838		cpu_attach_domain(sd, d.rd, i);
5839	}
5840	rcu_read_unlock();
5841
5842	ret = 0;
5843error:
5844	__free_domain_allocs(&d, alloc_state, cpu_map);
5845	return ret;
5846}
5847
5848static cpumask_var_t *doms_cur;	/* current sched domains */
5849static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
5850static struct sched_domain_attr *dattr_cur;
5851				/* attribues of custom domains in 'doms_cur' */
5852
5853/*
5854 * Special case: If a kmalloc of a doms_cur partition (array of
5855 * cpumask) fails, then fallback to a single sched domain,
5856 * as determined by the single cpumask fallback_doms.
5857 */
5858static cpumask_var_t fallback_doms;
5859
5860/*
5861 * arch_update_cpu_topology lets virtualized architectures update the
5862 * cpu core maps. It is supposed to return 1 if the topology changed
5863 * or 0 if it stayed the same.
5864 */
5865int __attribute__((weak)) arch_update_cpu_topology(void)
5866{
5867	return 0;
5868}
5869
5870cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
5871{
5872	int i;
5873	cpumask_var_t *doms;
5874
5875	doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
5876	if (!doms)
5877		return NULL;
5878	for (i = 0; i < ndoms; i++) {
5879		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
5880			free_sched_domains(doms, i);
5881			return NULL;
5882		}
5883	}
5884	return doms;
5885}
5886
5887void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
5888{
5889	unsigned int i;
5890	for (i = 0; i < ndoms; i++)
5891		free_cpumask_var(doms[i]);
5892	kfree(doms);
5893}
5894
5895/*
5896 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
5897 * For now this just excludes isolated cpus, but could be used to
5898 * exclude other special cases in the future.
5899 */
5900static int init_sched_domains(const struct cpumask *cpu_map)
5901{
5902	int err;
5903
5904	arch_update_cpu_topology();
5905	ndoms_cur = 1;
5906	doms_cur = alloc_sched_domains(ndoms_cur);
5907	if (!doms_cur)
5908		doms_cur = &fallback_doms;
5909	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
5910	err = build_sched_domains(doms_cur[0], NULL);
5911	register_sched_domain_sysctl();
5912
5913	return err;
5914}
5915
5916/*
5917 * Detach sched domains from a group of cpus specified in cpu_map
5918 * These cpus will now be attached to the NULL domain
5919 */
5920static void detach_destroy_domains(const struct cpumask *cpu_map)
5921{
5922	int i;
5923
5924	rcu_read_lock();
5925	for_each_cpu(i, cpu_map)
5926		cpu_attach_domain(NULL, &def_root_domain, i);
5927	rcu_read_unlock();
5928}
5929
5930/* handle null as "default" */
5931static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
5932			struct sched_domain_attr *new, int idx_new)
5933{
5934	struct sched_domain_attr tmp;
5935
5936	/* fast path */
5937	if (!new && !cur)
5938		return 1;
5939
5940	tmp = SD_ATTR_INIT;
5941	return !memcmp(cur ? (cur + idx_cur) : &tmp,
5942			new ? (new + idx_new) : &tmp,
5943			sizeof(struct sched_domain_attr));
5944}
5945
5946/*
5947 * Partition sched domains as specified by the 'ndoms_new'
5948 * cpumasks in the array doms_new[] of cpumasks. This compares
5949 * doms_new[] to the current sched domain partitioning, doms_cur[].
5950 * It destroys each deleted domain and builds each new domain.
5951 *
5952 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
5953 * The masks don't intersect (don't overlap.) We should setup one
5954 * sched domain for each mask. CPUs not in any of the cpumasks will
5955 * not be load balanced. If the same cpumask appears both in the
5956 * current 'doms_cur' domains and in the new 'doms_new', we can leave
5957 * it as it is.
5958 *
5959 * The passed in 'doms_new' should be allocated using
5960 * alloc_sched_domains.  This routine takes ownership of it and will
5961 * free_sched_domains it when done with it. If the caller failed the
5962 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
5963 * and partition_sched_domains() will fallback to the single partition
5964 * 'fallback_doms', it also forces the domains to be rebuilt.
5965 *
5966 * If doms_new == NULL it will be replaced with cpu_online_mask.
5967 * ndoms_new == 0 is a special case for destroying existing domains,
5968 * and it will not create the default domain.
5969 *
5970 * Call with hotplug lock held
5971 */
5972void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
5973			     struct sched_domain_attr *dattr_new)
5974{
5975	int i, j, n;
5976	int new_topology;
5977
5978	mutex_lock(&sched_domains_mutex);
5979
5980	/* always unregister in case we don't destroy any domains */
5981	unregister_sched_domain_sysctl();
5982
5983	/* Let architecture update cpu core mappings. */
5984	new_topology = arch_update_cpu_topology();
5985
5986	n = doms_new ? ndoms_new : 0;
5987
5988	/* Destroy deleted domains */
5989	for (i = 0; i < ndoms_cur; i++) {
5990		for (j = 0; j < n && !new_topology; j++) {
5991			if (cpumask_equal(doms_cur[i], doms_new[j])
5992			    && dattrs_equal(dattr_cur, i, dattr_new, j))
5993				goto match1;
5994		}
5995		/* no match - a current sched domain not in new doms_new[] */
5996		detach_destroy_domains(doms_cur[i]);
5997match1:
5998		;
5999	}
6000
6001	n = ndoms_cur;
6002	if (doms_new == NULL) {
6003		n = 0;
6004		doms_new = &fallback_doms;
6005		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
6006		WARN_ON_ONCE(dattr_new);
6007	}
6008
6009	/* Build new domains */
6010	for (i = 0; i < ndoms_new; i++) {
6011		for (j = 0; j < n && !new_topology; j++) {
6012			if (cpumask_equal(doms_new[i], doms_cur[j])
6013			    && dattrs_equal(dattr_new, i, dattr_cur, j))
6014				goto match2;
6015		}
6016		/* no match - add a new doms_new */
6017		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
6018match2:
6019		;
6020	}
6021
6022	/* Remember the new sched domains */
6023	if (doms_cur != &fallback_doms)
6024		free_sched_domains(doms_cur, ndoms_cur);
6025	kfree(dattr_cur);	/* kfree(NULL) is safe */
6026	doms_cur = doms_new;
6027	dattr_cur = dattr_new;
6028	ndoms_cur = ndoms_new;
6029
6030	register_sched_domain_sysctl();
6031
6032	mutex_unlock(&sched_domains_mutex);
6033}
6034
6035static int num_cpus_frozen;	/* used to mark begin/end of suspend/resume */
6036
6037/*
6038 * Update cpusets according to cpu_active mask.  If cpusets are
6039 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6040 * around partition_sched_domains().
6041 *
6042 * If we come here as part of a suspend/resume, don't touch cpusets because we
6043 * want to restore it back to its original state upon resume anyway.
6044 */
6045static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6046			     void *hcpu)
6047{
6048	switch (action) {
6049	case CPU_ONLINE_FROZEN:
6050	case CPU_DOWN_FAILED_FROZEN:
6051
6052		/*
6053		 * num_cpus_frozen tracks how many CPUs are involved in suspend
6054		 * resume sequence. As long as this is not the last online
6055		 * operation in the resume sequence, just build a single sched
6056		 * domain, ignoring cpusets.
6057		 */
6058		num_cpus_frozen--;
6059		if (likely(num_cpus_frozen)) {
6060			partition_sched_domains(1, NULL, NULL);
6061			break;
6062		}
6063
6064		/*
6065		 * This is the last CPU online operation. So fall through and
6066		 * restore the original sched domains by considering the
6067		 * cpuset configurations.
6068		 */
6069
6070	case CPU_ONLINE:
6071	case CPU_DOWN_FAILED:
6072		cpuset_update_active_cpus(true);
6073		break;
6074	default:
6075		return NOTIFY_DONE;
6076	}
6077	return NOTIFY_OK;
6078}
6079
6080static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6081			       void *hcpu)
6082{
6083	switch (action) {
6084	case CPU_DOWN_PREPARE:
6085		cpuset_update_active_cpus(false);
6086		break;
6087	case CPU_DOWN_PREPARE_FROZEN:
6088		num_cpus_frozen++;
6089		partition_sched_domains(1, NULL, NULL);
6090		break;
6091	default:
6092		return NOTIFY_DONE;
6093	}
6094	return NOTIFY_OK;
6095}
6096
6097void __init sched_init_smp(void)
6098{
6099	cpumask_var_t non_isolated_cpus;
6100
6101	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
6102	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
6103
6104	sched_init_numa();
6105
6106	/*
6107	 * There's no userspace yet to cause hotplug operations; hence all the
6108	 * cpu masks are stable and all blatant races in the below code cannot
6109	 * happen.
6110	 */
6111	mutex_lock(&sched_domains_mutex);
6112	init_sched_domains(cpu_active_mask);
6113	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6114	if (cpumask_empty(non_isolated_cpus))
6115		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
6116	mutex_unlock(&sched_domains_mutex);
6117
6118	hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
6119	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6120	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
6121
6122	init_hrtick();
6123
6124	/* Move init over to a non-isolated CPU */
6125	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
6126		BUG();
6127	sched_init_granularity();
6128	free_cpumask_var(non_isolated_cpus);
6129
6130	init_sched_rt_class();
6131}
6132#else
6133void __init sched_init_smp(void)
6134{
6135	sched_init_granularity();
6136}
6137#endif /* CONFIG_SMP */
6138
6139const_debug unsigned int sysctl_timer_migration = 1;
6140
6141int in_sched_functions(unsigned long addr)
6142{
6143	return in_lock_functions(addr) ||
6144		(addr >= (unsigned long)__sched_text_start
6145		&& addr < (unsigned long)__sched_text_end);
6146}
6147
6148#ifdef CONFIG_CGROUP_SCHED
6149/*
6150 * Default task group.
6151 * Every task in system belongs to this group at bootup.
6152 */
6153struct task_group root_task_group;
6154LIST_HEAD(task_groups);
6155#endif
6156
6157DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6158
6159void __init sched_init(void)
6160{
6161	int i, j;
6162	unsigned long alloc_size = 0, ptr;
6163
6164#ifdef CONFIG_FAIR_GROUP_SCHED
6165	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6166#endif
6167#ifdef CONFIG_RT_GROUP_SCHED
6168	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6169#endif
6170#ifdef CONFIG_CPUMASK_OFFSTACK
6171	alloc_size += num_possible_cpus() * cpumask_size();
6172#endif
6173	if (alloc_size) {
6174		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
6175
6176#ifdef CONFIG_FAIR_GROUP_SCHED
6177		root_task_group.se = (struct sched_entity **)ptr;
6178		ptr += nr_cpu_ids * sizeof(void **);
6179
6180		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
6181		ptr += nr_cpu_ids * sizeof(void **);
6182
6183#endif /* CONFIG_FAIR_GROUP_SCHED */
6184#ifdef CONFIG_RT_GROUP_SCHED
6185		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
6186		ptr += nr_cpu_ids * sizeof(void **);
6187
6188		root_task_group.rt_rq = (struct rt_rq **)ptr;
6189		ptr += nr_cpu_ids * sizeof(void **);
6190
6191#endif /* CONFIG_RT_GROUP_SCHED */
6192#ifdef CONFIG_CPUMASK_OFFSTACK
6193		for_each_possible_cpu(i) {
6194			per_cpu(load_balance_mask, i) = (void *)ptr;
6195			ptr += cpumask_size();
6196		}
6197#endif /* CONFIG_CPUMASK_OFFSTACK */
6198	}
6199
6200#ifdef CONFIG_SMP
6201	init_defrootdomain();
6202#endif
6203
6204	init_rt_bandwidth(&def_rt_bandwidth,
6205			global_rt_period(), global_rt_runtime());
6206
6207#ifdef CONFIG_RT_GROUP_SCHED
6208	init_rt_bandwidth(&root_task_group.rt_bandwidth,
6209			global_rt_period(), global_rt_runtime());
6210#endif /* CONFIG_RT_GROUP_SCHED */
6211
6212#ifdef CONFIG_CGROUP_SCHED
6213	list_add(&root_task_group.list, &task_groups);
6214	INIT_LIST_HEAD(&root_task_group.children);
6215	INIT_LIST_HEAD(&root_task_group.siblings);
6216	autogroup_init(&init_task);
6217
6218#endif /* CONFIG_CGROUP_SCHED */
6219
6220	for_each_possible_cpu(i) {
6221		struct rq *rq;
6222
6223		rq = cpu_rq(i);
6224		raw_spin_lock_init(&rq->lock);
6225		rq->nr_running = 0;
6226		rq->calc_load_active = 0;
6227		rq->calc_load_update = jiffies + LOAD_FREQ;
6228		init_cfs_rq(&rq->cfs);
6229		init_rt_rq(&rq->rt, rq);
6230#ifdef CONFIG_FAIR_GROUP_SCHED
6231		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6232		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6233		/*
6234		 * How much cpu bandwidth does root_task_group get?
6235		 *
6236		 * In case of task-groups formed thr' the cgroup filesystem, it
6237		 * gets 100% of the cpu resources in the system. This overall
6238		 * system cpu resource is divided among the tasks of
6239		 * root_task_group and its child task-groups in a fair manner,
6240		 * based on each entity's (task or task-group's) weight
6241		 * (se->load.weight).
6242		 *
6243		 * In other words, if root_task_group has 10 tasks of weight
6244		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6245		 * then A0's share of the cpu resource is:
6246		 *
6247		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
6248		 *
6249		 * We achieve this by letting root_task_group's tasks sit
6250		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
6251		 */
6252		init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
6253		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
6254#endif /* CONFIG_FAIR_GROUP_SCHED */
6255
6256		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
6257#ifdef CONFIG_RT_GROUP_SCHED
6258		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
6259		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
6260#endif
6261
6262		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6263			rq->cpu_load[j] = 0;
6264
6265		rq->last_load_update_tick = jiffies;
6266
6267#ifdef CONFIG_SMP
6268		rq->sd = NULL;
6269		rq->rd = NULL;
6270		rq->cpu_power = SCHED_POWER_SCALE;
6271		rq->post_schedule = 0;
6272		rq->active_balance = 0;
6273		rq->next_balance = jiffies;
6274		rq->push_cpu = 0;
6275		rq->cpu = i;
6276		rq->online = 0;
6277		rq->idle_stamp = 0;
6278		rq->avg_idle = 2*sysctl_sched_migration_cost;
6279		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
6280
6281		INIT_LIST_HEAD(&rq->cfs_tasks);
6282
6283		rq_attach_root(rq, &def_root_domain);
6284#ifdef CONFIG_NO_HZ_COMMON
6285		rq->nohz_flags = 0;
6286#endif
6287#ifdef CONFIG_NO_HZ_FULL
6288		rq->last_sched_tick = 0;
6289#endif
6290#endif
6291		init_rq_hrtick(rq);
6292		atomic_set(&rq->nr_iowait, 0);
6293	}
6294
6295	set_load_weight(&init_task);
6296
6297#ifdef CONFIG_PREEMPT_NOTIFIERS
6298	INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6299#endif
6300
6301#ifdef CONFIG_RT_MUTEXES
6302	plist_head_init(&init_task.pi_waiters);
6303#endif
6304
6305	/*
6306	 * The boot idle thread does lazy MMU switching as well:
6307	 */
6308	atomic_inc(&init_mm.mm_count);
6309	enter_lazy_tlb(&init_mm, current);
6310
6311	/*
6312	 * Make us the idle thread. Technically, schedule() should not be
6313	 * called from this thread, however somewhere below it might be,
6314	 * but because we are the idle thread, we just pick up running again
6315	 * when this runqueue becomes "idle".
6316	 */
6317	init_idle(current, smp_processor_id());
6318
6319	calc_load_update = jiffies + LOAD_FREQ;
6320
6321	/*
6322	 * During early bootup we pretend to be a normal task:
6323	 */
6324	current->sched_class = &fair_sched_class;
6325
6326#ifdef CONFIG_SMP
6327	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
6328	/* May be allocated at isolcpus cmdline parse time */
6329	if (cpu_isolated_map == NULL)
6330		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
6331	idle_thread_set_boot_cpu();
6332#endif
6333	init_sched_fair_class();
6334
6335	scheduler_running = 1;
6336}
6337
6338#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
6339static inline int preempt_count_equals(int preempt_offset)
6340{
6341	int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
6342
6343	return (nested == preempt_offset);
6344}
6345
6346void __might_sleep(const char *file, int line, int preempt_offset)
6347{
6348	static unsigned long prev_jiffy;	/* ratelimiting */
6349
6350	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
6351	if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6352	    system_state != SYSTEM_RUNNING || oops_in_progress)
6353		return;
6354	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6355		return;
6356	prev_jiffy = jiffies;
6357
6358	printk(KERN_ERR
6359		"BUG: sleeping function called from invalid context at %s:%d\n",
6360			file, line);
6361	printk(KERN_ERR
6362		"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6363			in_atomic(), irqs_disabled(),
6364			current->pid, current->comm);
6365
6366	debug_show_held_locks(current);
6367	if (irqs_disabled())
6368		print_irqtrace_events(current);
6369	dump_stack();
6370}
6371EXPORT_SYMBOL(__might_sleep);
6372#endif
6373
6374#ifdef CONFIG_MAGIC_SYSRQ
6375static void normalize_task(struct rq *rq, struct task_struct *p)
6376{
6377	const struct sched_class *prev_class = p->sched_class;
6378	int old_prio = p->prio;
6379	int on_rq;
6380
6381	on_rq = p->on_rq;
6382	if (on_rq)
6383		dequeue_task(rq, p, 0);
6384	__setscheduler(rq, p, SCHED_NORMAL, 0);
6385	if (on_rq) {
6386		enqueue_task(rq, p, 0);
6387		resched_task(rq->curr);
6388	}
6389
6390	check_class_changed(rq, p, prev_class, old_prio);
6391}
6392
6393void normalize_rt_tasks(void)
6394{
6395	struct task_struct *g, *p;
6396	unsigned long flags;
6397	struct rq *rq;
6398
6399	read_lock_irqsave(&tasklist_lock, flags);
6400	do_each_thread(g, p) {
6401		/*
6402		 * Only normalize user tasks:
6403		 */
6404		if (!p->mm)
6405			continue;
6406
6407		p->se.exec_start		= 0;
6408#ifdef CONFIG_SCHEDSTATS
6409		p->se.statistics.wait_start	= 0;
6410		p->se.statistics.sleep_start	= 0;
6411		p->se.statistics.block_start	= 0;
6412#endif
6413
6414		if (!rt_task(p)) {
6415			/*
6416			 * Renice negative nice level userspace
6417			 * tasks back to 0:
6418			 */
6419			if (TASK_NICE(p) < 0 && p->mm)
6420				set_user_nice(p, 0);
6421			continue;
6422		}
6423
6424		raw_spin_lock(&p->pi_lock);
6425		rq = __task_rq_lock(p);
6426
6427		normalize_task(rq, p);
6428
6429		__task_rq_unlock(rq);
6430		raw_spin_unlock(&p->pi_lock);
6431	} while_each_thread(g, p);
6432
6433	read_unlock_irqrestore(&tasklist_lock, flags);
6434}
6435
6436#endif /* CONFIG_MAGIC_SYSRQ */
6437
6438#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
6439/*
6440 * These functions are only useful for the IA64 MCA handling, or kdb.
6441 *
6442 * They can only be called when the whole system has been
6443 * stopped - every CPU needs to be quiescent, and no scheduling
6444 * activity can take place. Using them for anything else would
6445 * be a serious bug, and as a result, they aren't even visible
6446 * under any other configuration.
6447 */
6448
6449/**
6450 * curr_task - return the current task for a given cpu.
6451 * @cpu: the processor in question.
6452 *
6453 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6454 *
6455 * Return: The current task for @cpu.
6456 */
6457struct task_struct *curr_task(int cpu)
6458{
6459	return cpu_curr(cpu);
6460}
6461
6462#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
6463
6464#ifdef CONFIG_IA64
6465/**
6466 * set_curr_task - set the current task for a given cpu.
6467 * @cpu: the processor in question.
6468 * @p: the task pointer to set.
6469 *
6470 * Description: This function must only be used when non-maskable interrupts
6471 * are serviced on a separate stack. It allows the architecture to switch the
6472 * notion of the current task on a cpu in a non-blocking manner. This function
6473 * must be called with all CPU's synchronized, and interrupts disabled, the
6474 * and caller must save the original value of the current task (see
6475 * curr_task() above) and restore that value before reenabling interrupts and
6476 * re-starting the system.
6477 *
6478 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6479 */
6480void set_curr_task(int cpu, struct task_struct *p)
6481{
6482	cpu_curr(cpu) = p;
6483}
6484
6485#endif
6486
6487#ifdef CONFIG_CGROUP_SCHED
6488/* task_group_lock serializes the addition/removal of task groups */
6489static DEFINE_SPINLOCK(task_group_lock);
6490
6491static void free_sched_group(struct task_group *tg)
6492{
6493	free_fair_sched_group(tg);
6494	free_rt_sched_group(tg);
6495	autogroup_free(tg);
6496	kfree(tg);
6497}
6498
6499/* allocate runqueue etc for a new task group */
6500struct task_group *sched_create_group(struct task_group *parent)
6501{
6502	struct task_group *tg;
6503
6504	tg = kzalloc(sizeof(*tg), GFP_KERNEL);
6505	if (!tg)
6506		return ERR_PTR(-ENOMEM);
6507
6508	if (!alloc_fair_sched_group(tg, parent))
6509		goto err;
6510
6511	if (!alloc_rt_sched_group(tg, parent))
6512		goto err;
6513
6514	return tg;
6515
6516err:
6517	free_sched_group(tg);
6518	return ERR_PTR(-ENOMEM);
6519}
6520
6521void sched_online_group(struct task_group *tg, struct task_group *parent)
6522{
6523	unsigned long flags;
6524
6525	spin_lock_irqsave(&task_group_lock, flags);
6526	list_add_rcu(&tg->list, &task_groups);
6527
6528	WARN_ON(!parent); /* root should already exist */
6529
6530	tg->parent = parent;
6531	INIT_LIST_HEAD(&tg->children);
6532	list_add_rcu(&tg->siblings, &parent->children);
6533	spin_unlock_irqrestore(&task_group_lock, flags);
6534}
6535
6536/* rcu callback to free various structures associated with a task group */
6537static void free_sched_group_rcu(struct rcu_head *rhp)
6538{
6539	/* now it should be safe to free those cfs_rqs */
6540	free_sched_group(container_of(rhp, struct task_group, rcu));
6541}
6542
6543/* Destroy runqueue etc associated with a task group */
6544void sched_destroy_group(struct task_group *tg)
6545{
6546	/* wait for possible concurrent references to cfs_rqs complete */
6547	call_rcu(&tg->rcu, free_sched_group_rcu);
6548}
6549
6550void sched_offline_group(struct task_group *tg)
6551{
6552	unsigned long flags;
6553	int i;
6554
6555	/* end participation in shares distribution */
6556	for_each_possible_cpu(i)
6557		unregister_fair_sched_group(tg, i);
6558
6559	spin_lock_irqsave(&task_group_lock, flags);
6560	list_del_rcu(&tg->list);
6561	list_del_rcu(&tg->siblings);
6562	spin_unlock_irqrestore(&task_group_lock, flags);
6563}
6564
6565/* change task's runqueue when it moves between groups.
6566 *	The caller of this function should have put the task in its new group
6567 *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
6568 *	reflect its new group.
6569 */
6570void sched_move_task(struct task_struct *tsk)
6571{
6572	struct task_group *tg;
6573	int on_rq, running;
6574	unsigned long flags;
6575	struct rq *rq;
6576
6577	rq = task_rq_lock(tsk, &flags);
6578
6579	running = task_current(rq, tsk);
6580	on_rq = tsk->on_rq;
6581
6582	if (on_rq)
6583		dequeue_task(rq, tsk, 0);
6584	if (unlikely(running))
6585		tsk->sched_class->put_prev_task(rq, tsk);
6586
6587	tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
6588				lockdep_is_held(&tsk->sighand->siglock)),
6589			  struct task_group, css);
6590	tg = autogroup_task_group(tsk, tg);
6591	tsk->sched_task_group = tg;
6592
6593#ifdef CONFIG_FAIR_GROUP_SCHED
6594	if (tsk->sched_class->task_move_group)
6595		tsk->sched_class->task_move_group(tsk, on_rq);
6596	else
6597#endif
6598		set_task_rq(tsk, task_cpu(tsk));
6599
6600	if (unlikely(running))
6601		tsk->sched_class->set_curr_task(rq);
6602	if (on_rq)
6603		enqueue_task(rq, tsk, 0);
6604
6605	task_rq_unlock(rq, tsk, &flags);
6606}
6607#endif /* CONFIG_CGROUP_SCHED */
6608
6609#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
6610static unsigned long to_ratio(u64 period, u64 runtime)
6611{
6612	if (runtime == RUNTIME_INF)
6613		return 1ULL << 20;
6614
6615	return div64_u64(runtime << 20, period);
6616}
6617#endif
6618
6619#ifdef CONFIG_RT_GROUP_SCHED
6620/*
6621 * Ensure that the real time constraints are schedulable.
6622 */
6623static DEFINE_MUTEX(rt_constraints_mutex);
6624
6625/* Must be called with tasklist_lock held */
6626static inline int tg_has_rt_tasks(struct task_group *tg)
6627{
6628	struct task_struct *g, *p;
6629
6630	do_each_thread(g, p) {
6631		if (rt_task(p) && task_rq(p)->rt.tg == tg)
6632			return 1;
6633	} while_each_thread(g, p);
6634
6635	return 0;
6636}
6637
6638struct rt_schedulable_data {
6639	struct task_group *tg;
6640	u64 rt_period;
6641	u64 rt_runtime;
6642};
6643
6644static int tg_rt_schedulable(struct task_group *tg, void *data)
6645{
6646	struct rt_schedulable_data *d = data;
6647	struct task_group *child;
6648	unsigned long total, sum = 0;
6649	u64 period, runtime;
6650
6651	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
6652	runtime = tg->rt_bandwidth.rt_runtime;
6653
6654	if (tg == d->tg) {
6655		period = d->rt_period;
6656		runtime = d->rt_runtime;
6657	}
6658
6659	/*
6660	 * Cannot have more runtime than the period.
6661	 */
6662	if (runtime > period && runtime != RUNTIME_INF)
6663		return -EINVAL;
6664
6665	/*
6666	 * Ensure we don't starve existing RT tasks.
6667	 */
6668	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
6669		return -EBUSY;
6670
6671	total = to_ratio(period, runtime);
6672
6673	/*
6674	 * Nobody can have more than the global setting allows.
6675	 */
6676	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
6677		return -EINVAL;
6678
6679	/*
6680	 * The sum of our children's runtime should not exceed our own.
6681	 */
6682	list_for_each_entry_rcu(child, &tg->children, siblings) {
6683		period = ktime_to_ns(child->rt_bandwidth.rt_period);
6684		runtime = child->rt_bandwidth.rt_runtime;
6685
6686		if (child == d->tg) {
6687			period = d->rt_period;
6688			runtime = d->rt_runtime;
6689		}
6690
6691		sum += to_ratio(period, runtime);
6692	}
6693
6694	if (sum > total)
6695		return -EINVAL;
6696
6697	return 0;
6698}
6699
6700static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
6701{
6702	int ret;
6703
6704	struct rt_schedulable_data data = {
6705		.tg = tg,
6706		.rt_period = period,
6707		.rt_runtime = runtime,
6708	};
6709
6710	rcu_read_lock();
6711	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
6712	rcu_read_unlock();
6713
6714	return ret;
6715}
6716
6717static int tg_set_rt_bandwidth(struct task_group *tg,
6718		u64 rt_period, u64 rt_runtime)
6719{
6720	int i, err = 0;
6721
6722	mutex_lock(&rt_constraints_mutex);
6723	read_lock(&tasklist_lock);
6724	err = __rt_schedulable(tg, rt_period, rt_runtime);
6725	if (err)
6726		goto unlock;
6727
6728	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
6729	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
6730	tg->rt_bandwidth.rt_runtime = rt_runtime;
6731
6732	for_each_possible_cpu(i) {
6733		struct rt_rq *rt_rq = tg->rt_rq[i];
6734
6735		raw_spin_lock(&rt_rq->rt_runtime_lock);
6736		rt_rq->rt_runtime = rt_runtime;
6737		raw_spin_unlock(&rt_rq->rt_runtime_lock);
6738	}
6739	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
6740unlock:
6741	read_unlock(&tasklist_lock);
6742	mutex_unlock(&rt_constraints_mutex);
6743
6744	return err;
6745}
6746
6747static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
6748{
6749	u64 rt_runtime, rt_period;
6750
6751	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
6752	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
6753	if (rt_runtime_us < 0)
6754		rt_runtime = RUNTIME_INF;
6755
6756	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
6757}
6758
6759static long sched_group_rt_runtime(struct task_group *tg)
6760{
6761	u64 rt_runtime_us;
6762
6763	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
6764		return -1;
6765
6766	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
6767	do_div(rt_runtime_us, NSEC_PER_USEC);
6768	return rt_runtime_us;
6769}
6770
6771static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
6772{
6773	u64 rt_runtime, rt_period;
6774
6775	rt_period = (u64)rt_period_us * NSEC_PER_USEC;
6776	rt_runtime = tg->rt_bandwidth.rt_runtime;
6777
6778	if (rt_period == 0)
6779		return -EINVAL;
6780
6781	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
6782}
6783
6784static long sched_group_rt_period(struct task_group *tg)
6785{
6786	u64 rt_period_us;
6787
6788	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
6789	do_div(rt_period_us, NSEC_PER_USEC);
6790	return rt_period_us;
6791}
6792
6793static int sched_rt_global_constraints(void)
6794{
6795	u64 runtime, period;
6796	int ret = 0;
6797
6798	if (sysctl_sched_rt_period <= 0)
6799		return -EINVAL;
6800
6801	runtime = global_rt_runtime();
6802	period = global_rt_period();
6803
6804	/*
6805	 * Sanity check on the sysctl variables.
6806	 */
6807	if (runtime > period && runtime != RUNTIME_INF)
6808		return -EINVAL;
6809
6810	mutex_lock(&rt_constraints_mutex);
6811	read_lock(&tasklist_lock);
6812	ret = __rt_schedulable(NULL, 0, 0);
6813	read_unlock(&tasklist_lock);
6814	mutex_unlock(&rt_constraints_mutex);
6815
6816	return ret;
6817}
6818
6819static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
6820{
6821	/* Don't accept realtime tasks when there is no way for them to run */
6822	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
6823		return 0;
6824
6825	return 1;
6826}
6827
6828#else /* !CONFIG_RT_GROUP_SCHED */
6829static int sched_rt_global_constraints(void)
6830{
6831	unsigned long flags;
6832	int i;
6833
6834	if (sysctl_sched_rt_period <= 0)
6835		return -EINVAL;
6836
6837	/*
6838	 * There's always some RT tasks in the root group
6839	 * -- migration, kstopmachine etc..
6840	 */
6841	if (sysctl_sched_rt_runtime == 0)
6842		return -EBUSY;
6843
6844	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
6845	for_each_possible_cpu(i) {
6846		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
6847
6848		raw_spin_lock(&rt_rq->rt_runtime_lock);
6849		rt_rq->rt_runtime = global_rt_runtime();
6850		raw_spin_unlock(&rt_rq->rt_runtime_lock);
6851	}
6852	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
6853
6854	return 0;
6855}
6856#endif /* CONFIG_RT_GROUP_SCHED */
6857
6858int sched_rr_handler(struct ctl_table *table, int write,
6859		void __user *buffer, size_t *lenp,
6860		loff_t *ppos)
6861{
6862	int ret;
6863	static DEFINE_MUTEX(mutex);
6864
6865	mutex_lock(&mutex);
6866	ret = proc_dointvec(table, write, buffer, lenp, ppos);
6867	/* make sure that internally we keep jiffies */
6868	/* also, writing zero resets timeslice to default */
6869	if (!ret && write) {
6870		sched_rr_timeslice = sched_rr_timeslice <= 0 ?
6871			RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
6872	}
6873	mutex_unlock(&mutex);
6874	return ret;
6875}
6876
6877int sched_rt_handler(struct ctl_table *table, int write,
6878		void __user *buffer, size_t *lenp,
6879		loff_t *ppos)
6880{
6881	int ret;
6882	int old_period, old_runtime;
6883	static DEFINE_MUTEX(mutex);
6884
6885	mutex_lock(&mutex);
6886	old_period = sysctl_sched_rt_period;
6887	old_runtime = sysctl_sched_rt_runtime;
6888
6889	ret = proc_dointvec(table, write, buffer, lenp, ppos);
6890
6891	if (!ret && write) {
6892		ret = sched_rt_global_constraints();
6893		if (ret) {
6894			sysctl_sched_rt_period = old_period;
6895			sysctl_sched_rt_runtime = old_runtime;
6896		} else {
6897			def_rt_bandwidth.rt_runtime = global_rt_runtime();
6898			def_rt_bandwidth.rt_period =
6899				ns_to_ktime(global_rt_period());
6900		}
6901	}
6902	mutex_unlock(&mutex);
6903
6904	return ret;
6905}
6906
6907#ifdef CONFIG_CGROUP_SCHED
6908
6909static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
6910{
6911	return css ? container_of(css, struct task_group, css) : NULL;
6912}
6913
6914static struct cgroup_subsys_state *
6915cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6916{
6917	struct task_group *parent = css_tg(parent_css);
6918	struct task_group *tg;
6919
6920	if (!parent) {
6921		/* This is early initialization for the top cgroup */
6922		return &root_task_group.css;
6923	}
6924
6925	tg = sched_create_group(parent);
6926	if (IS_ERR(tg))
6927		return ERR_PTR(-ENOMEM);
6928
6929	return &tg->css;
6930}
6931
6932static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
6933{
6934	struct task_group *tg = css_tg(css);
6935	struct task_group *parent = css_tg(css_parent(css));
6936
6937	if (parent)
6938		sched_online_group(tg, parent);
6939	return 0;
6940}
6941
6942static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
6943{
6944	struct task_group *tg = css_tg(css);
6945
6946	sched_destroy_group(tg);
6947}
6948
6949static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
6950{
6951	struct task_group *tg = css_tg(css);
6952
6953	sched_offline_group(tg);
6954}
6955
6956static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
6957				 struct cgroup_taskset *tset)
6958{
6959	struct task_struct *task;
6960
6961	cgroup_taskset_for_each(task, css, tset) {
6962#ifdef CONFIG_RT_GROUP_SCHED
6963		if (!sched_rt_can_attach(css_tg(css), task))
6964			return -EINVAL;
6965#else
6966		/* We don't support RT-tasks being in separate groups */
6967		if (task->sched_class != &fair_sched_class)
6968			return -EINVAL;
6969#endif
6970	}
6971	return 0;
6972}
6973
6974static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
6975			      struct cgroup_taskset *tset)
6976{
6977	struct task_struct *task;
6978
6979	cgroup_taskset_for_each(task, css, tset)
6980		sched_move_task(task);
6981}
6982
6983static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
6984			    struct cgroup_subsys_state *old_css,
6985			    struct task_struct *task)
6986{
6987	/*
6988	 * cgroup_exit() is called in the copy_process() failure path.
6989	 * Ignore this case since the task hasn't ran yet, this avoids
6990	 * trying to poke a half freed task state from generic code.
6991	 */
6992	if (!(task->flags & PF_EXITING))
6993		return;
6994
6995	sched_move_task(task);
6996}
6997
6998#ifdef CONFIG_FAIR_GROUP_SCHED
6999static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
7000				struct cftype *cftype, u64 shareval)
7001{
7002	return sched_group_set_shares(css_tg(css), scale_load(shareval));
7003}
7004
7005static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
7006			       struct cftype *cft)
7007{
7008	struct task_group *tg = css_tg(css);
7009
7010	return (u64) scale_load_down(tg->shares);
7011}
7012
7013#ifdef CONFIG_CFS_BANDWIDTH
7014static DEFINE_MUTEX(cfs_constraints_mutex);
7015
7016const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7017const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7018
7019static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7020
7021static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7022{
7023	int i, ret = 0, runtime_enabled, runtime_was_enabled;
7024	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7025
7026	if (tg == &root_task_group)
7027		return -EINVAL;
7028
7029	/*
7030	 * Ensure we have at some amount of bandwidth every period.  This is
7031	 * to prevent reaching a state of large arrears when throttled via
7032	 * entity_tick() resulting in prolonged exit starvation.
7033	 */
7034	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7035		return -EINVAL;
7036
7037	/*
7038	 * Likewise, bound things on the otherside by preventing insane quota
7039	 * periods.  This also allows us to normalize in computing quota
7040	 * feasibility.
7041	 */
7042	if (period > max_cfs_quota_period)
7043		return -EINVAL;
7044
7045	mutex_lock(&cfs_constraints_mutex);
7046	ret = __cfs_schedulable(tg, period, quota);
7047	if (ret)
7048		goto out_unlock;
7049
7050	runtime_enabled = quota != RUNTIME_INF;
7051	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7052	/*
7053	 * If we need to toggle cfs_bandwidth_used, off->on must occur
7054	 * before making related changes, and on->off must occur afterwards
7055	 */
7056	if (runtime_enabled && !runtime_was_enabled)
7057		cfs_bandwidth_usage_inc();
7058	raw_spin_lock_irq(&cfs_b->lock);
7059	cfs_b->period = ns_to_ktime(period);
7060	cfs_b->quota = quota;
7061
7062	__refill_cfs_bandwidth_runtime(cfs_b);
7063	/* restart the period timer (if active) to handle new period expiry */
7064	if (runtime_enabled && cfs_b->timer_active) {
7065		/* force a reprogram */
7066		cfs_b->timer_active = 0;
7067		__start_cfs_bandwidth(cfs_b);
7068	}
7069	raw_spin_unlock_irq(&cfs_b->lock);
7070
7071	for_each_possible_cpu(i) {
7072		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
7073		struct rq *rq = cfs_rq->rq;
7074
7075		raw_spin_lock_irq(&rq->lock);
7076		cfs_rq->runtime_enabled = runtime_enabled;
7077		cfs_rq->runtime_remaining = 0;
7078
7079		if (cfs_rq->throttled)
7080			unthrottle_cfs_rq(cfs_rq);
7081		raw_spin_unlock_irq(&rq->lock);
7082	}
7083	if (runtime_was_enabled && !runtime_enabled)
7084		cfs_bandwidth_usage_dec();
7085out_unlock:
7086	mutex_unlock(&cfs_constraints_mutex);
7087
7088	return ret;
7089}
7090
7091int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7092{
7093	u64 quota, period;
7094
7095	period = ktime_to_ns(tg->cfs_bandwidth.period);
7096	if (cfs_quota_us < 0)
7097		quota = RUNTIME_INF;
7098	else
7099		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7100
7101	return tg_set_cfs_bandwidth(tg, period, quota);
7102}
7103
7104long tg_get_cfs_quota(struct task_group *tg)
7105{
7106	u64 quota_us;
7107
7108	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
7109		return -1;
7110
7111	quota_us = tg->cfs_bandwidth.quota;
7112	do_div(quota_us, NSEC_PER_USEC);
7113
7114	return quota_us;
7115}
7116
7117int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7118{
7119	u64 quota, period;
7120
7121	period = (u64)cfs_period_us * NSEC_PER_USEC;
7122	quota = tg->cfs_bandwidth.quota;
7123
7124	return tg_set_cfs_bandwidth(tg, period, quota);
7125}
7126
7127long tg_get_cfs_period(struct task_group *tg)
7128{
7129	u64 cfs_period_us;
7130
7131	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
7132	do_div(cfs_period_us, NSEC_PER_USEC);
7133
7134	return cfs_period_us;
7135}
7136
7137static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
7138				  struct cftype *cft)
7139{
7140	return tg_get_cfs_quota(css_tg(css));
7141}
7142
7143static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
7144				   struct cftype *cftype, s64 cfs_quota_us)
7145{
7146	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
7147}
7148
7149static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
7150				   struct cftype *cft)
7151{
7152	return tg_get_cfs_period(css_tg(css));
7153}
7154
7155static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
7156				    struct cftype *cftype, u64 cfs_period_us)
7157{
7158	return tg_set_cfs_period(css_tg(css), cfs_period_us);
7159}
7160
7161struct cfs_schedulable_data {
7162	struct task_group *tg;
7163	u64 period, quota;
7164};
7165
7166/*
7167 * normalize group quota/period to be quota/max_period
7168 * note: units are usecs
7169 */
7170static u64 normalize_cfs_quota(struct task_group *tg,
7171			       struct cfs_schedulable_data *d)
7172{
7173	u64 quota, period;
7174
7175	if (tg == d->tg) {
7176		period = d->period;
7177		quota = d->quota;
7178	} else {
7179		period = tg_get_cfs_period(tg);
7180		quota = tg_get_cfs_quota(tg);
7181	}
7182
7183	/* note: these should typically be equivalent */
7184	if (quota == RUNTIME_INF || quota == -1)
7185		return RUNTIME_INF;
7186
7187	return to_ratio(period, quota);
7188}
7189
7190static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7191{
7192	struct cfs_schedulable_data *d = data;
7193	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7194	s64 quota = 0, parent_quota = -1;
7195
7196	if (!tg->parent) {
7197		quota = RUNTIME_INF;
7198	} else {
7199		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
7200
7201		quota = normalize_cfs_quota(tg, d);
7202		parent_quota = parent_b->hierarchal_quota;
7203
7204		/*
7205		 * ensure max(child_quota) <= parent_quota, inherit when no
7206		 * limit is set
7207		 */
7208		if (quota == RUNTIME_INF)
7209			quota = parent_quota;
7210		else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7211			return -EINVAL;
7212	}
7213	cfs_b->hierarchal_quota = quota;
7214
7215	return 0;
7216}
7217
7218static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7219{
7220	int ret;
7221	struct cfs_schedulable_data data = {
7222		.tg = tg,
7223		.period = period,
7224		.quota = quota,
7225	};
7226
7227	if (quota != RUNTIME_INF) {
7228		do_div(data.period, NSEC_PER_USEC);
7229		do_div(data.quota, NSEC_PER_USEC);
7230	}
7231
7232	rcu_read_lock();
7233	ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7234	rcu_read_unlock();
7235
7236	return ret;
7237}
7238
7239static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft,
7240		struct cgroup_map_cb *cb)
7241{
7242	struct task_group *tg = css_tg(css);
7243	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7244
7245	cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7246	cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7247	cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7248
7249	return 0;
7250}
7251#endif /* CONFIG_CFS_BANDWIDTH */
7252#endif /* CONFIG_FAIR_GROUP_SCHED */
7253
7254#ifdef CONFIG_RT_GROUP_SCHED
7255static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
7256				struct cftype *cft, s64 val)
7257{
7258	return sched_group_set_rt_runtime(css_tg(css), val);
7259}
7260
7261static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
7262			       struct cftype *cft)
7263{
7264	return sched_group_rt_runtime(css_tg(css));
7265}
7266
7267static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
7268				    struct cftype *cftype, u64 rt_period_us)
7269{
7270	return sched_group_set_rt_period(css_tg(css), rt_period_us);
7271}
7272
7273static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
7274				   struct cftype *cft)
7275{
7276	return sched_group_rt_period(css_tg(css));
7277}
7278#endif /* CONFIG_RT_GROUP_SCHED */
7279
7280static struct cftype cpu_files[] = {
7281#ifdef CONFIG_FAIR_GROUP_SCHED
7282	{
7283		.name = "shares",
7284		.read_u64 = cpu_shares_read_u64,
7285		.write_u64 = cpu_shares_write_u64,
7286	},
7287#endif
7288#ifdef CONFIG_CFS_BANDWIDTH
7289	{
7290		.name = "cfs_quota_us",
7291		.read_s64 = cpu_cfs_quota_read_s64,
7292		.write_s64 = cpu_cfs_quota_write_s64,
7293	},
7294	{
7295		.name = "cfs_period_us",
7296		.read_u64 = cpu_cfs_period_read_u64,
7297		.write_u64 = cpu_cfs_period_write_u64,
7298	},
7299	{
7300		.name = "stat",
7301		.read_map = cpu_stats_show,
7302	},
7303#endif
7304#ifdef CONFIG_RT_GROUP_SCHED
7305	{
7306		.name = "rt_runtime_us",
7307		.read_s64 = cpu_rt_runtime_read,
7308		.write_s64 = cpu_rt_runtime_write,
7309	},
7310	{
7311		.name = "rt_period_us",
7312		.read_u64 = cpu_rt_period_read_uint,
7313		.write_u64 = cpu_rt_period_write_uint,
7314	},
7315#endif
7316	{ }	/* terminate */
7317};
7318
7319struct cgroup_subsys cpu_cgroup_subsys = {
7320	.name		= "cpu",
7321	.css_alloc	= cpu_cgroup_css_alloc,
7322	.css_free	= cpu_cgroup_css_free,
7323	.css_online	= cpu_cgroup_css_online,
7324	.css_offline	= cpu_cgroup_css_offline,
7325	.can_attach	= cpu_cgroup_can_attach,
7326	.attach		= cpu_cgroup_attach,
7327	.exit		= cpu_cgroup_exit,
7328	.subsys_id	= cpu_cgroup_subsys_id,
7329	.base_cftypes	= cpu_files,
7330	.early_init	= 1,
7331};
7332
7333#endif	/* CONFIG_CGROUP_SCHED */
7334
7335void dump_cpu_task(int cpu)
7336{
7337	pr_info("Task dump for CPU %d:\n", cpu);
7338	sched_show_task(cpu_curr(cpu));
7339}
7340