core.c revision 5cd08fbfdb6baa9fe98f530b76898fc5725a6289
1/* 2 * kernel/sched/core.c 3 * 4 * Kernel scheduler and related syscalls 5 * 6 * Copyright (C) 1991-2002 Linus Torvalds 7 * 8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and 9 * make semaphores SMP safe 10 * 1998-11-19 Implemented schedule_timeout() and related stuff 11 * by Andrea Arcangeli 12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: 13 * hybrid priority-list and round-robin design with 14 * an array-switch method of distributing timeslices 15 * and per-CPU runqueues. Cleanups and useful suggestions 16 * by Davide Libenzi, preemptible kernel bits by Robert Love. 17 * 2003-09-03 Interactivity tuning by Con Kolivas. 18 * 2004-04-02 Scheduler domains code by Nick Piggin 19 * 2007-04-15 Work begun on replacing all interactivity tuning with a 20 * fair scheduling design by Con Kolivas. 21 * 2007-05-05 Load balancing (smp-nice) and other improvements 22 * by Peter Williams 23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith 24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri 25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, 26 * Thomas Gleixner, Mike Kravetz 27 */ 28 29#include <linux/mm.h> 30#include <linux/module.h> 31#include <linux/nmi.h> 32#include <linux/init.h> 33#include <linux/uaccess.h> 34#include <linux/highmem.h> 35#include <asm/mmu_context.h> 36#include <linux/interrupt.h> 37#include <linux/capability.h> 38#include <linux/completion.h> 39#include <linux/kernel_stat.h> 40#include <linux/debug_locks.h> 41#include <linux/perf_event.h> 42#include <linux/security.h> 43#include <linux/notifier.h> 44#include <linux/profile.h> 45#include <linux/freezer.h> 46#include <linux/vmalloc.h> 47#include <linux/blkdev.h> 48#include <linux/delay.h> 49#include <linux/pid_namespace.h> 50#include <linux/smp.h> 51#include <linux/threads.h> 52#include <linux/timer.h> 53#include <linux/rcupdate.h> 54#include <linux/cpu.h> 55#include <linux/cpuset.h> 56#include <linux/percpu.h> 57#include <linux/proc_fs.h> 58#include <linux/seq_file.h> 59#include <linux/sysctl.h> 60#include <linux/syscalls.h> 61#include <linux/times.h> 62#include <linux/tsacct_kern.h> 63#include <linux/kprobes.h> 64#include <linux/delayacct.h> 65#include <linux/unistd.h> 66#include <linux/pagemap.h> 67#include <linux/hrtimer.h> 68#include <linux/tick.h> 69#include <linux/debugfs.h> 70#include <linux/ctype.h> 71#include <linux/ftrace.h> 72#include <linux/slab.h> 73#include <linux/init_task.h> 74#include <linux/binfmts.h> 75#include <linux/context_tracking.h> 76#include <linux/compiler.h> 77 78#include <asm/switch_to.h> 79#include <asm/tlb.h> 80#include <asm/irq_regs.h> 81#include <asm/mutex.h> 82#ifdef CONFIG_PARAVIRT 83#include <asm/paravirt.h> 84#endif 85 86#include "sched.h" 87#include "../workqueue_internal.h" 88#include "../smpboot.h" 89 90#define CREATE_TRACE_POINTS 91#include <trace/events/sched.h> 92 93#ifdef smp_mb__before_atomic 94void __smp_mb__before_atomic(void) 95{ 96 smp_mb__before_atomic(); 97} 98EXPORT_SYMBOL(__smp_mb__before_atomic); 99#endif 100 101#ifdef smp_mb__after_atomic 102void __smp_mb__after_atomic(void) 103{ 104 smp_mb__after_atomic(); 105} 106EXPORT_SYMBOL(__smp_mb__after_atomic); 107#endif 108 109void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) 110{ 111 unsigned long delta; 112 ktime_t soft, hard, now; 113 114 for (;;) { 115 if (hrtimer_active(period_timer)) 116 break; 117 118 now = hrtimer_cb_get_time(period_timer); 119 hrtimer_forward(period_timer, now, period); 120 121 soft = hrtimer_get_softexpires(period_timer); 122 hard = hrtimer_get_expires(period_timer); 123 delta = ktime_to_ns(ktime_sub(hard, soft)); 124 __hrtimer_start_range_ns(period_timer, soft, delta, 125 HRTIMER_MODE_ABS_PINNED, 0); 126 } 127} 128 129DEFINE_MUTEX(sched_domains_mutex); 130DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 131 132static void update_rq_clock_task(struct rq *rq, s64 delta); 133 134void update_rq_clock(struct rq *rq) 135{ 136 s64 delta; 137 138 if (rq->skip_clock_update > 0) 139 return; 140 141 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 142 if (delta < 0) 143 return; 144 rq->clock += delta; 145 update_rq_clock_task(rq, delta); 146} 147 148/* 149 * Debugging: various feature bits 150 */ 151 152#define SCHED_FEAT(name, enabled) \ 153 (1UL << __SCHED_FEAT_##name) * enabled | 154 155const_debug unsigned int sysctl_sched_features = 156#include "features.h" 157 0; 158 159#undef SCHED_FEAT 160 161#ifdef CONFIG_SCHED_DEBUG 162#define SCHED_FEAT(name, enabled) \ 163 #name , 164 165static const char * const sched_feat_names[] = { 166#include "features.h" 167}; 168 169#undef SCHED_FEAT 170 171static int sched_feat_show(struct seq_file *m, void *v) 172{ 173 int i; 174 175 for (i = 0; i < __SCHED_FEAT_NR; i++) { 176 if (!(sysctl_sched_features & (1UL << i))) 177 seq_puts(m, "NO_"); 178 seq_printf(m, "%s ", sched_feat_names[i]); 179 } 180 seq_puts(m, "\n"); 181 182 return 0; 183} 184 185#ifdef HAVE_JUMP_LABEL 186 187#define jump_label_key__true STATIC_KEY_INIT_TRUE 188#define jump_label_key__false STATIC_KEY_INIT_FALSE 189 190#define SCHED_FEAT(name, enabled) \ 191 jump_label_key__##enabled , 192 193struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 194#include "features.h" 195}; 196 197#undef SCHED_FEAT 198 199static void sched_feat_disable(int i) 200{ 201 if (static_key_enabled(&sched_feat_keys[i])) 202 static_key_slow_dec(&sched_feat_keys[i]); 203} 204 205static void sched_feat_enable(int i) 206{ 207 if (!static_key_enabled(&sched_feat_keys[i])) 208 static_key_slow_inc(&sched_feat_keys[i]); 209} 210#else 211static void sched_feat_disable(int i) { }; 212static void sched_feat_enable(int i) { }; 213#endif /* HAVE_JUMP_LABEL */ 214 215static int sched_feat_set(char *cmp) 216{ 217 int i; 218 int neg = 0; 219 220 if (strncmp(cmp, "NO_", 3) == 0) { 221 neg = 1; 222 cmp += 3; 223 } 224 225 for (i = 0; i < __SCHED_FEAT_NR; i++) { 226 if (strcmp(cmp, sched_feat_names[i]) == 0) { 227 if (neg) { 228 sysctl_sched_features &= ~(1UL << i); 229 sched_feat_disable(i); 230 } else { 231 sysctl_sched_features |= (1UL << i); 232 sched_feat_enable(i); 233 } 234 break; 235 } 236 } 237 238 return i; 239} 240 241static ssize_t 242sched_feat_write(struct file *filp, const char __user *ubuf, 243 size_t cnt, loff_t *ppos) 244{ 245 char buf[64]; 246 char *cmp; 247 int i; 248 struct inode *inode; 249 250 if (cnt > 63) 251 cnt = 63; 252 253 if (copy_from_user(&buf, ubuf, cnt)) 254 return -EFAULT; 255 256 buf[cnt] = 0; 257 cmp = strstrip(buf); 258 259 /* Ensure the static_key remains in a consistent state */ 260 inode = file_inode(filp); 261 mutex_lock(&inode->i_mutex); 262 i = sched_feat_set(cmp); 263 mutex_unlock(&inode->i_mutex); 264 if (i == __SCHED_FEAT_NR) 265 return -EINVAL; 266 267 *ppos += cnt; 268 269 return cnt; 270} 271 272static int sched_feat_open(struct inode *inode, struct file *filp) 273{ 274 return single_open(filp, sched_feat_show, NULL); 275} 276 277static const struct file_operations sched_feat_fops = { 278 .open = sched_feat_open, 279 .write = sched_feat_write, 280 .read = seq_read, 281 .llseek = seq_lseek, 282 .release = single_release, 283}; 284 285static __init int sched_init_debug(void) 286{ 287 debugfs_create_file("sched_features", 0644, NULL, NULL, 288 &sched_feat_fops); 289 290 return 0; 291} 292late_initcall(sched_init_debug); 293#endif /* CONFIG_SCHED_DEBUG */ 294 295/* 296 * Number of tasks to iterate in a single balance run. 297 * Limited because this is done with IRQs disabled. 298 */ 299const_debug unsigned int sysctl_sched_nr_migrate = 32; 300 301/* 302 * period over which we average the RT time consumption, measured 303 * in ms. 304 * 305 * default: 1s 306 */ 307const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; 308 309/* 310 * period over which we measure -rt task cpu usage in us. 311 * default: 1s 312 */ 313unsigned int sysctl_sched_rt_period = 1000000; 314 315__read_mostly int scheduler_running; 316 317/* 318 * part of the period that we allow rt tasks to run in us. 319 * default: 0.95s 320 */ 321int sysctl_sched_rt_runtime = 950000; 322 323/* 324 * __task_rq_lock - lock the rq @p resides on. 325 */ 326static inline struct rq *__task_rq_lock(struct task_struct *p) 327 __acquires(rq->lock) 328{ 329 struct rq *rq; 330 331 lockdep_assert_held(&p->pi_lock); 332 333 for (;;) { 334 rq = task_rq(p); 335 raw_spin_lock(&rq->lock); 336 if (likely(rq == task_rq(p))) 337 return rq; 338 raw_spin_unlock(&rq->lock); 339 } 340} 341 342/* 343 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 344 */ 345static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 346 __acquires(p->pi_lock) 347 __acquires(rq->lock) 348{ 349 struct rq *rq; 350 351 for (;;) { 352 raw_spin_lock_irqsave(&p->pi_lock, *flags); 353 rq = task_rq(p); 354 raw_spin_lock(&rq->lock); 355 if (likely(rq == task_rq(p))) 356 return rq; 357 raw_spin_unlock(&rq->lock); 358 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 359 } 360} 361 362static void __task_rq_unlock(struct rq *rq) 363 __releases(rq->lock) 364{ 365 raw_spin_unlock(&rq->lock); 366} 367 368static inline void 369task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) 370 __releases(rq->lock) 371 __releases(p->pi_lock) 372{ 373 raw_spin_unlock(&rq->lock); 374 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 375} 376 377/* 378 * this_rq_lock - lock this runqueue and disable interrupts. 379 */ 380static struct rq *this_rq_lock(void) 381 __acquires(rq->lock) 382{ 383 struct rq *rq; 384 385 local_irq_disable(); 386 rq = this_rq(); 387 raw_spin_lock(&rq->lock); 388 389 return rq; 390} 391 392#ifdef CONFIG_SCHED_HRTICK 393/* 394 * Use HR-timers to deliver accurate preemption points. 395 */ 396 397static void hrtick_clear(struct rq *rq) 398{ 399 if (hrtimer_active(&rq->hrtick_timer)) 400 hrtimer_cancel(&rq->hrtick_timer); 401} 402 403/* 404 * High-resolution timer tick. 405 * Runs from hardirq context with interrupts disabled. 406 */ 407static enum hrtimer_restart hrtick(struct hrtimer *timer) 408{ 409 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 410 411 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 412 413 raw_spin_lock(&rq->lock); 414 update_rq_clock(rq); 415 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 416 raw_spin_unlock(&rq->lock); 417 418 return HRTIMER_NORESTART; 419} 420 421#ifdef CONFIG_SMP 422 423static int __hrtick_restart(struct rq *rq) 424{ 425 struct hrtimer *timer = &rq->hrtick_timer; 426 ktime_t time = hrtimer_get_softexpires(timer); 427 428 return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0); 429} 430 431/* 432 * called from hardirq (IPI) context 433 */ 434static void __hrtick_start(void *arg) 435{ 436 struct rq *rq = arg; 437 438 raw_spin_lock(&rq->lock); 439 __hrtick_restart(rq); 440 rq->hrtick_csd_pending = 0; 441 raw_spin_unlock(&rq->lock); 442} 443 444/* 445 * Called to set the hrtick timer state. 446 * 447 * called with rq->lock held and irqs disabled 448 */ 449void hrtick_start(struct rq *rq, u64 delay) 450{ 451 struct hrtimer *timer = &rq->hrtick_timer; 452 ktime_t time = ktime_add_ns(timer->base->get_time(), delay); 453 454 hrtimer_set_expires(timer, time); 455 456 if (rq == this_rq()) { 457 __hrtick_restart(rq); 458 } else if (!rq->hrtick_csd_pending) { 459 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 460 rq->hrtick_csd_pending = 1; 461 } 462} 463 464static int 465hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) 466{ 467 int cpu = (int)(long)hcpu; 468 469 switch (action) { 470 case CPU_UP_CANCELED: 471 case CPU_UP_CANCELED_FROZEN: 472 case CPU_DOWN_PREPARE: 473 case CPU_DOWN_PREPARE_FROZEN: 474 case CPU_DEAD: 475 case CPU_DEAD_FROZEN: 476 hrtick_clear(cpu_rq(cpu)); 477 return NOTIFY_OK; 478 } 479 480 return NOTIFY_DONE; 481} 482 483static __init void init_hrtick(void) 484{ 485 hotcpu_notifier(hotplug_hrtick, 0); 486} 487#else 488/* 489 * Called to set the hrtick timer state. 490 * 491 * called with rq->lock held and irqs disabled 492 */ 493void hrtick_start(struct rq *rq, u64 delay) 494{ 495 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, 496 HRTIMER_MODE_REL_PINNED, 0); 497} 498 499static inline void init_hrtick(void) 500{ 501} 502#endif /* CONFIG_SMP */ 503 504static void init_rq_hrtick(struct rq *rq) 505{ 506#ifdef CONFIG_SMP 507 rq->hrtick_csd_pending = 0; 508 509 rq->hrtick_csd.flags = 0; 510 rq->hrtick_csd.func = __hrtick_start; 511 rq->hrtick_csd.info = rq; 512#endif 513 514 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 515 rq->hrtick_timer.function = hrtick; 516} 517#else /* CONFIG_SCHED_HRTICK */ 518static inline void hrtick_clear(struct rq *rq) 519{ 520} 521 522static inline void init_rq_hrtick(struct rq *rq) 523{ 524} 525 526static inline void init_hrtick(void) 527{ 528} 529#endif /* CONFIG_SCHED_HRTICK */ 530 531/* 532 * cmpxchg based fetch_or, macro so it works for different integer types 533 */ 534#define fetch_or(ptr, val) \ 535({ typeof(*(ptr)) __old, __val = *(ptr); \ 536 for (;;) { \ 537 __old = cmpxchg((ptr), __val, __val | (val)); \ 538 if (__old == __val) \ 539 break; \ 540 __val = __old; \ 541 } \ 542 __old; \ 543}) 544 545#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 546/* 547 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 548 * this avoids any races wrt polling state changes and thereby avoids 549 * spurious IPIs. 550 */ 551static bool set_nr_and_not_polling(struct task_struct *p) 552{ 553 struct thread_info *ti = task_thread_info(p); 554 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 555} 556 557/* 558 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 559 * 560 * If this returns true, then the idle task promises to call 561 * sched_ttwu_pending() and reschedule soon. 562 */ 563static bool set_nr_if_polling(struct task_struct *p) 564{ 565 struct thread_info *ti = task_thread_info(p); 566 typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags); 567 568 for (;;) { 569 if (!(val & _TIF_POLLING_NRFLAG)) 570 return false; 571 if (val & _TIF_NEED_RESCHED) 572 return true; 573 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 574 if (old == val) 575 break; 576 val = old; 577 } 578 return true; 579} 580 581#else 582static bool set_nr_and_not_polling(struct task_struct *p) 583{ 584 set_tsk_need_resched(p); 585 return true; 586} 587 588#ifdef CONFIG_SMP 589static bool set_nr_if_polling(struct task_struct *p) 590{ 591 return false; 592} 593#endif 594#endif 595 596/* 597 * resched_curr - mark rq's current task 'to be rescheduled now'. 598 * 599 * On UP this means the setting of the need_resched flag, on SMP it 600 * might also involve a cross-CPU call to trigger the scheduler on 601 * the target CPU. 602 */ 603void resched_curr(struct rq *rq) 604{ 605 struct task_struct *curr = rq->curr; 606 int cpu; 607 608 lockdep_assert_held(&rq->lock); 609 610 if (test_tsk_need_resched(curr)) 611 return; 612 613 cpu = cpu_of(rq); 614 615 if (cpu == smp_processor_id()) { 616 set_tsk_need_resched(curr); 617 set_preempt_need_resched(); 618 return; 619 } 620 621 if (set_nr_and_not_polling(curr)) 622 smp_send_reschedule(cpu); 623 else 624 trace_sched_wake_idle_without_ipi(cpu); 625} 626 627void resched_cpu(int cpu) 628{ 629 struct rq *rq = cpu_rq(cpu); 630 unsigned long flags; 631 632 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 633 return; 634 resched_curr(rq); 635 raw_spin_unlock_irqrestore(&rq->lock, flags); 636} 637 638#ifdef CONFIG_SMP 639#ifdef CONFIG_NO_HZ_COMMON 640/* 641 * In the semi idle case, use the nearest busy cpu for migrating timers 642 * from an idle cpu. This is good for power-savings. 643 * 644 * We don't do similar optimization for completely idle system, as 645 * selecting an idle cpu will add more delays to the timers than intended 646 * (as that cpu's timer base may not be uptodate wrt jiffies etc). 647 */ 648int get_nohz_timer_target(int pinned) 649{ 650 int cpu = smp_processor_id(); 651 int i; 652 struct sched_domain *sd; 653 654 if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu)) 655 return cpu; 656 657 rcu_read_lock(); 658 for_each_domain(cpu, sd) { 659 for_each_cpu(i, sched_domain_span(sd)) { 660 if (!idle_cpu(i)) { 661 cpu = i; 662 goto unlock; 663 } 664 } 665 } 666unlock: 667 rcu_read_unlock(); 668 return cpu; 669} 670/* 671 * When add_timer_on() enqueues a timer into the timer wheel of an 672 * idle CPU then this timer might expire before the next timer event 673 * which is scheduled to wake up that CPU. In case of a completely 674 * idle system the next event might even be infinite time into the 675 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 676 * leaves the inner idle loop so the newly added timer is taken into 677 * account when the CPU goes back to idle and evaluates the timer 678 * wheel for the next timer event. 679 */ 680static void wake_up_idle_cpu(int cpu) 681{ 682 struct rq *rq = cpu_rq(cpu); 683 684 if (cpu == smp_processor_id()) 685 return; 686 687 if (set_nr_and_not_polling(rq->idle)) 688 smp_send_reschedule(cpu); 689 else 690 trace_sched_wake_idle_without_ipi(cpu); 691} 692 693static bool wake_up_full_nohz_cpu(int cpu) 694{ 695 /* 696 * We just need the target to call irq_exit() and re-evaluate 697 * the next tick. The nohz full kick at least implies that. 698 * If needed we can still optimize that later with an 699 * empty IRQ. 700 */ 701 if (tick_nohz_full_cpu(cpu)) { 702 if (cpu != smp_processor_id() || 703 tick_nohz_tick_stopped()) 704 tick_nohz_full_kick_cpu(cpu); 705 return true; 706 } 707 708 return false; 709} 710 711void wake_up_nohz_cpu(int cpu) 712{ 713 if (!wake_up_full_nohz_cpu(cpu)) 714 wake_up_idle_cpu(cpu); 715} 716 717static inline bool got_nohz_idle_kick(void) 718{ 719 int cpu = smp_processor_id(); 720 721 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) 722 return false; 723 724 if (idle_cpu(cpu) && !need_resched()) 725 return true; 726 727 /* 728 * We can't run Idle Load Balance on this CPU for this time so we 729 * cancel it and clear NOHZ_BALANCE_KICK 730 */ 731 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 732 return false; 733} 734 735#else /* CONFIG_NO_HZ_COMMON */ 736 737static inline bool got_nohz_idle_kick(void) 738{ 739 return false; 740} 741 742#endif /* CONFIG_NO_HZ_COMMON */ 743 744#ifdef CONFIG_NO_HZ_FULL 745bool sched_can_stop_tick(void) 746{ 747 /* 748 * More than one running task need preemption. 749 * nr_running update is assumed to be visible 750 * after IPI is sent from wakers. 751 */ 752 if (this_rq()->nr_running > 1) 753 return false; 754 755 return true; 756} 757#endif /* CONFIG_NO_HZ_FULL */ 758 759void sched_avg_update(struct rq *rq) 760{ 761 s64 period = sched_avg_period(); 762 763 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { 764 /* 765 * Inline assembly required to prevent the compiler 766 * optimising this loop into a divmod call. 767 * See __iter_div_u64_rem() for another example of this. 768 */ 769 asm("" : "+rm" (rq->age_stamp)); 770 rq->age_stamp += period; 771 rq->rt_avg /= 2; 772 } 773} 774 775#endif /* CONFIG_SMP */ 776 777#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 778 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 779/* 780 * Iterate task_group tree rooted at *from, calling @down when first entering a 781 * node and @up when leaving it for the final time. 782 * 783 * Caller must hold rcu_lock or sufficient equivalent. 784 */ 785int walk_tg_tree_from(struct task_group *from, 786 tg_visitor down, tg_visitor up, void *data) 787{ 788 struct task_group *parent, *child; 789 int ret; 790 791 parent = from; 792 793down: 794 ret = (*down)(parent, data); 795 if (ret) 796 goto out; 797 list_for_each_entry_rcu(child, &parent->children, siblings) { 798 parent = child; 799 goto down; 800 801up: 802 continue; 803 } 804 ret = (*up)(parent, data); 805 if (ret || parent == from) 806 goto out; 807 808 child = parent; 809 parent = parent->parent; 810 if (parent) 811 goto up; 812out: 813 return ret; 814} 815 816int tg_nop(struct task_group *tg, void *data) 817{ 818 return 0; 819} 820#endif 821 822static void set_load_weight(struct task_struct *p) 823{ 824 int prio = p->static_prio - MAX_RT_PRIO; 825 struct load_weight *load = &p->se.load; 826 827 /* 828 * SCHED_IDLE tasks get minimal weight: 829 */ 830 if (p->policy == SCHED_IDLE) { 831 load->weight = scale_load(WEIGHT_IDLEPRIO); 832 load->inv_weight = WMULT_IDLEPRIO; 833 return; 834 } 835 836 load->weight = scale_load(prio_to_weight[prio]); 837 load->inv_weight = prio_to_wmult[prio]; 838} 839 840static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 841{ 842 update_rq_clock(rq); 843 sched_info_queued(rq, p); 844 p->sched_class->enqueue_task(rq, p, flags); 845} 846 847static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 848{ 849 update_rq_clock(rq); 850 sched_info_dequeued(rq, p); 851 p->sched_class->dequeue_task(rq, p, flags); 852} 853 854void activate_task(struct rq *rq, struct task_struct *p, int flags) 855{ 856 if (task_contributes_to_load(p)) 857 rq->nr_uninterruptible--; 858 859 enqueue_task(rq, p, flags); 860} 861 862void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 863{ 864 if (task_contributes_to_load(p)) 865 rq->nr_uninterruptible++; 866 867 dequeue_task(rq, p, flags); 868} 869 870static void update_rq_clock_task(struct rq *rq, s64 delta) 871{ 872/* 873 * In theory, the compile should just see 0 here, and optimize out the call 874 * to sched_rt_avg_update. But I don't trust it... 875 */ 876#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 877 s64 steal = 0, irq_delta = 0; 878#endif 879#ifdef CONFIG_IRQ_TIME_ACCOUNTING 880 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 881 882 /* 883 * Since irq_time is only updated on {soft,}irq_exit, we might run into 884 * this case when a previous update_rq_clock() happened inside a 885 * {soft,}irq region. 886 * 887 * When this happens, we stop ->clock_task and only update the 888 * prev_irq_time stamp to account for the part that fit, so that a next 889 * update will consume the rest. This ensures ->clock_task is 890 * monotonic. 891 * 892 * It does however cause some slight miss-attribution of {soft,}irq 893 * time, a more accurate solution would be to update the irq_time using 894 * the current rq->clock timestamp, except that would require using 895 * atomic ops. 896 */ 897 if (irq_delta > delta) 898 irq_delta = delta; 899 900 rq->prev_irq_time += irq_delta; 901 delta -= irq_delta; 902#endif 903#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 904 if (static_key_false((¶virt_steal_rq_enabled))) { 905 steal = paravirt_steal_clock(cpu_of(rq)); 906 steal -= rq->prev_steal_time_rq; 907 908 if (unlikely(steal > delta)) 909 steal = delta; 910 911 rq->prev_steal_time_rq += steal; 912 delta -= steal; 913 } 914#endif 915 916 rq->clock_task += delta; 917 918#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 919 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 920 sched_rt_avg_update(rq, irq_delta + steal); 921#endif 922} 923 924void sched_set_stop_task(int cpu, struct task_struct *stop) 925{ 926 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 927 struct task_struct *old_stop = cpu_rq(cpu)->stop; 928 929 if (stop) { 930 /* 931 * Make it appear like a SCHED_FIFO task, its something 932 * userspace knows about and won't get confused about. 933 * 934 * Also, it will make PI more or less work without too 935 * much confusion -- but then, stop work should not 936 * rely on PI working anyway. 937 */ 938 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 939 940 stop->sched_class = &stop_sched_class; 941 } 942 943 cpu_rq(cpu)->stop = stop; 944 945 if (old_stop) { 946 /* 947 * Reset it back to a normal scheduling class so that 948 * it can die in pieces. 949 */ 950 old_stop->sched_class = &rt_sched_class; 951 } 952} 953 954/* 955 * __normal_prio - return the priority that is based on the static prio 956 */ 957static inline int __normal_prio(struct task_struct *p) 958{ 959 return p->static_prio; 960} 961 962/* 963 * Calculate the expected normal priority: i.e. priority 964 * without taking RT-inheritance into account. Might be 965 * boosted by interactivity modifiers. Changes upon fork, 966 * setprio syscalls, and whenever the interactivity 967 * estimator recalculates. 968 */ 969static inline int normal_prio(struct task_struct *p) 970{ 971 int prio; 972 973 if (task_has_dl_policy(p)) 974 prio = MAX_DL_PRIO-1; 975 else if (task_has_rt_policy(p)) 976 prio = MAX_RT_PRIO-1 - p->rt_priority; 977 else 978 prio = __normal_prio(p); 979 return prio; 980} 981 982/* 983 * Calculate the current priority, i.e. the priority 984 * taken into account by the scheduler. This value might 985 * be boosted by RT tasks, or might be boosted by 986 * interactivity modifiers. Will be RT if the task got 987 * RT-boosted. If not then it returns p->normal_prio. 988 */ 989static int effective_prio(struct task_struct *p) 990{ 991 p->normal_prio = normal_prio(p); 992 /* 993 * If we are RT tasks or we were boosted to RT priority, 994 * keep the priority unchanged. Otherwise, update priority 995 * to the normal priority: 996 */ 997 if (!rt_prio(p->prio)) 998 return p->normal_prio; 999 return p->prio; 1000} 1001 1002/** 1003 * task_curr - is this task currently executing on a CPU? 1004 * @p: the task in question. 1005 * 1006 * Return: 1 if the task is currently executing. 0 otherwise. 1007 */ 1008inline int task_curr(const struct task_struct *p) 1009{ 1010 return cpu_curr(task_cpu(p)) == p; 1011} 1012 1013static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1014 const struct sched_class *prev_class, 1015 int oldprio) 1016{ 1017 if (prev_class != p->sched_class) { 1018 if (prev_class->switched_from) 1019 prev_class->switched_from(rq, p); 1020 p->sched_class->switched_to(rq, p); 1021 } else if (oldprio != p->prio || dl_task(p)) 1022 p->sched_class->prio_changed(rq, p, oldprio); 1023} 1024 1025void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1026{ 1027 const struct sched_class *class; 1028 1029 if (p->sched_class == rq->curr->sched_class) { 1030 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1031 } else { 1032 for_each_class(class) { 1033 if (class == rq->curr->sched_class) 1034 break; 1035 if (class == p->sched_class) { 1036 resched_curr(rq); 1037 break; 1038 } 1039 } 1040 } 1041 1042 /* 1043 * A queue event has occurred, and we're going to schedule. In 1044 * this case, we can save a useless back to back clock update. 1045 */ 1046 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) 1047 rq->skip_clock_update = 1; 1048} 1049 1050#ifdef CONFIG_SMP 1051void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1052{ 1053#ifdef CONFIG_SCHED_DEBUG 1054 /* 1055 * We should never call set_task_cpu() on a blocked task, 1056 * ttwu() will sort out the placement. 1057 */ 1058 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1059 !(task_preempt_count(p) & PREEMPT_ACTIVE)); 1060 1061#ifdef CONFIG_LOCKDEP 1062 /* 1063 * The caller should hold either p->pi_lock or rq->lock, when changing 1064 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1065 * 1066 * sched_move_task() holds both and thus holding either pins the cgroup, 1067 * see task_group(). 1068 * 1069 * Furthermore, all task_rq users should acquire both locks, see 1070 * task_rq_lock(). 1071 */ 1072 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1073 lockdep_is_held(&task_rq(p)->lock))); 1074#endif 1075#endif 1076 1077 trace_sched_migrate_task(p, new_cpu); 1078 1079 if (task_cpu(p) != new_cpu) { 1080 if (p->sched_class->migrate_task_rq) 1081 p->sched_class->migrate_task_rq(p, new_cpu); 1082 p->se.nr_migrations++; 1083 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); 1084 } 1085 1086 __set_task_cpu(p, new_cpu); 1087} 1088 1089static void __migrate_swap_task(struct task_struct *p, int cpu) 1090{ 1091 if (p->on_rq) { 1092 struct rq *src_rq, *dst_rq; 1093 1094 src_rq = task_rq(p); 1095 dst_rq = cpu_rq(cpu); 1096 1097 deactivate_task(src_rq, p, 0); 1098 set_task_cpu(p, cpu); 1099 activate_task(dst_rq, p, 0); 1100 check_preempt_curr(dst_rq, p, 0); 1101 } else { 1102 /* 1103 * Task isn't running anymore; make it appear like we migrated 1104 * it before it went to sleep. This means on wakeup we make the 1105 * previous cpu our targer instead of where it really is. 1106 */ 1107 p->wake_cpu = cpu; 1108 } 1109} 1110 1111struct migration_swap_arg { 1112 struct task_struct *src_task, *dst_task; 1113 int src_cpu, dst_cpu; 1114}; 1115 1116static int migrate_swap_stop(void *data) 1117{ 1118 struct migration_swap_arg *arg = data; 1119 struct rq *src_rq, *dst_rq; 1120 int ret = -EAGAIN; 1121 1122 src_rq = cpu_rq(arg->src_cpu); 1123 dst_rq = cpu_rq(arg->dst_cpu); 1124 1125 double_raw_lock(&arg->src_task->pi_lock, 1126 &arg->dst_task->pi_lock); 1127 double_rq_lock(src_rq, dst_rq); 1128 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1129 goto unlock; 1130 1131 if (task_cpu(arg->src_task) != arg->src_cpu) 1132 goto unlock; 1133 1134 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) 1135 goto unlock; 1136 1137 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) 1138 goto unlock; 1139 1140 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1141 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1142 1143 ret = 0; 1144 1145unlock: 1146 double_rq_unlock(src_rq, dst_rq); 1147 raw_spin_unlock(&arg->dst_task->pi_lock); 1148 raw_spin_unlock(&arg->src_task->pi_lock); 1149 1150 return ret; 1151} 1152 1153/* 1154 * Cross migrate two tasks 1155 */ 1156int migrate_swap(struct task_struct *cur, struct task_struct *p) 1157{ 1158 struct migration_swap_arg arg; 1159 int ret = -EINVAL; 1160 1161 arg = (struct migration_swap_arg){ 1162 .src_task = cur, 1163 .src_cpu = task_cpu(cur), 1164 .dst_task = p, 1165 .dst_cpu = task_cpu(p), 1166 }; 1167 1168 if (arg.src_cpu == arg.dst_cpu) 1169 goto out; 1170 1171 /* 1172 * These three tests are all lockless; this is OK since all of them 1173 * will be re-checked with proper locks held further down the line. 1174 */ 1175 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1176 goto out; 1177 1178 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) 1179 goto out; 1180 1181 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) 1182 goto out; 1183 1184 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1185 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1186 1187out: 1188 return ret; 1189} 1190 1191struct migration_arg { 1192 struct task_struct *task; 1193 int dest_cpu; 1194}; 1195 1196static int migration_cpu_stop(void *data); 1197 1198/* 1199 * wait_task_inactive - wait for a thread to unschedule. 1200 * 1201 * If @match_state is nonzero, it's the @p->state value just checked and 1202 * not expected to change. If it changes, i.e. @p might have woken up, 1203 * then return zero. When we succeed in waiting for @p to be off its CPU, 1204 * we return a positive number (its total switch count). If a second call 1205 * a short while later returns the same number, the caller can be sure that 1206 * @p has remained unscheduled the whole time. 1207 * 1208 * The caller must ensure that the task *will* unschedule sometime soon, 1209 * else this function might spin for a *long* time. This function can't 1210 * be called with interrupts off, or it may introduce deadlock with 1211 * smp_call_function() if an IPI is sent by the same process we are 1212 * waiting to become inactive. 1213 */ 1214unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1215{ 1216 unsigned long flags; 1217 int running, on_rq; 1218 unsigned long ncsw; 1219 struct rq *rq; 1220 1221 for (;;) { 1222 /* 1223 * We do the initial early heuristics without holding 1224 * any task-queue locks at all. We'll only try to get 1225 * the runqueue lock when things look like they will 1226 * work out! 1227 */ 1228 rq = task_rq(p); 1229 1230 /* 1231 * If the task is actively running on another CPU 1232 * still, just relax and busy-wait without holding 1233 * any locks. 1234 * 1235 * NOTE! Since we don't hold any locks, it's not 1236 * even sure that "rq" stays as the right runqueue! 1237 * But we don't care, since "task_running()" will 1238 * return false if the runqueue has changed and p 1239 * is actually now running somewhere else! 1240 */ 1241 while (task_running(rq, p)) { 1242 if (match_state && unlikely(p->state != match_state)) 1243 return 0; 1244 cpu_relax(); 1245 } 1246 1247 /* 1248 * Ok, time to look more closely! We need the rq 1249 * lock now, to be *sure*. If we're wrong, we'll 1250 * just go back and repeat. 1251 */ 1252 rq = task_rq_lock(p, &flags); 1253 trace_sched_wait_task(p); 1254 running = task_running(rq, p); 1255 on_rq = p->on_rq; 1256 ncsw = 0; 1257 if (!match_state || p->state == match_state) 1258 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1259 task_rq_unlock(rq, p, &flags); 1260 1261 /* 1262 * If it changed from the expected state, bail out now. 1263 */ 1264 if (unlikely(!ncsw)) 1265 break; 1266 1267 /* 1268 * Was it really running after all now that we 1269 * checked with the proper locks actually held? 1270 * 1271 * Oops. Go back and try again.. 1272 */ 1273 if (unlikely(running)) { 1274 cpu_relax(); 1275 continue; 1276 } 1277 1278 /* 1279 * It's not enough that it's not actively running, 1280 * it must be off the runqueue _entirely_, and not 1281 * preempted! 1282 * 1283 * So if it was still runnable (but just not actively 1284 * running right now), it's preempted, and we should 1285 * yield - it could be a while. 1286 */ 1287 if (unlikely(on_rq)) { 1288 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1289 1290 set_current_state(TASK_UNINTERRUPTIBLE); 1291 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1292 continue; 1293 } 1294 1295 /* 1296 * Ahh, all good. It wasn't running, and it wasn't 1297 * runnable, which means that it will never become 1298 * running in the future either. We're all done! 1299 */ 1300 break; 1301 } 1302 1303 return ncsw; 1304} 1305 1306/*** 1307 * kick_process - kick a running thread to enter/exit the kernel 1308 * @p: the to-be-kicked thread 1309 * 1310 * Cause a process which is running on another CPU to enter 1311 * kernel-mode, without any delay. (to get signals handled.) 1312 * 1313 * NOTE: this function doesn't have to take the runqueue lock, 1314 * because all it wants to ensure is that the remote task enters 1315 * the kernel. If the IPI races and the task has been migrated 1316 * to another CPU then no harm is done and the purpose has been 1317 * achieved as well. 1318 */ 1319void kick_process(struct task_struct *p) 1320{ 1321 int cpu; 1322 1323 preempt_disable(); 1324 cpu = task_cpu(p); 1325 if ((cpu != smp_processor_id()) && task_curr(p)) 1326 smp_send_reschedule(cpu); 1327 preempt_enable(); 1328} 1329EXPORT_SYMBOL_GPL(kick_process); 1330#endif /* CONFIG_SMP */ 1331 1332#ifdef CONFIG_SMP 1333/* 1334 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1335 */ 1336static int select_fallback_rq(int cpu, struct task_struct *p) 1337{ 1338 int nid = cpu_to_node(cpu); 1339 const struct cpumask *nodemask = NULL; 1340 enum { cpuset, possible, fail } state = cpuset; 1341 int dest_cpu; 1342 1343 /* 1344 * If the node that the cpu is on has been offlined, cpu_to_node() 1345 * will return -1. There is no cpu on the node, and we should 1346 * select the cpu on the other node. 1347 */ 1348 if (nid != -1) { 1349 nodemask = cpumask_of_node(nid); 1350 1351 /* Look for allowed, online CPU in same node. */ 1352 for_each_cpu(dest_cpu, nodemask) { 1353 if (!cpu_online(dest_cpu)) 1354 continue; 1355 if (!cpu_active(dest_cpu)) 1356 continue; 1357 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1358 return dest_cpu; 1359 } 1360 } 1361 1362 for (;;) { 1363 /* Any allowed, online CPU? */ 1364 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1365 if (!cpu_online(dest_cpu)) 1366 continue; 1367 if (!cpu_active(dest_cpu)) 1368 continue; 1369 goto out; 1370 } 1371 1372 switch (state) { 1373 case cpuset: 1374 /* No more Mr. Nice Guy. */ 1375 cpuset_cpus_allowed_fallback(p); 1376 state = possible; 1377 break; 1378 1379 case possible: 1380 do_set_cpus_allowed(p, cpu_possible_mask); 1381 state = fail; 1382 break; 1383 1384 case fail: 1385 BUG(); 1386 break; 1387 } 1388 } 1389 1390out: 1391 if (state != cpuset) { 1392 /* 1393 * Don't tell them about moving exiting tasks or 1394 * kernel threads (both mm NULL), since they never 1395 * leave kernel. 1396 */ 1397 if (p->mm && printk_ratelimit()) { 1398 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 1399 task_pid_nr(p), p->comm, cpu); 1400 } 1401 } 1402 1403 return dest_cpu; 1404} 1405 1406/* 1407 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1408 */ 1409static inline 1410int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 1411{ 1412 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1413 1414 /* 1415 * In order not to call set_task_cpu() on a blocking task we need 1416 * to rely on ttwu() to place the task on a valid ->cpus_allowed 1417 * cpu. 1418 * 1419 * Since this is common to all placement strategies, this lives here. 1420 * 1421 * [ this allows ->select_task() to simply return task_cpu(p) and 1422 * not worry about this generic constraint ] 1423 */ 1424 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || 1425 !cpu_online(cpu))) 1426 cpu = select_fallback_rq(task_cpu(p), p); 1427 1428 return cpu; 1429} 1430 1431static void update_avg(u64 *avg, u64 sample) 1432{ 1433 s64 diff = sample - *avg; 1434 *avg += diff >> 3; 1435} 1436#endif 1437 1438static void 1439ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 1440{ 1441#ifdef CONFIG_SCHEDSTATS 1442 struct rq *rq = this_rq(); 1443 1444#ifdef CONFIG_SMP 1445 int this_cpu = smp_processor_id(); 1446 1447 if (cpu == this_cpu) { 1448 schedstat_inc(rq, ttwu_local); 1449 schedstat_inc(p, se.statistics.nr_wakeups_local); 1450 } else { 1451 struct sched_domain *sd; 1452 1453 schedstat_inc(p, se.statistics.nr_wakeups_remote); 1454 rcu_read_lock(); 1455 for_each_domain(this_cpu, sd) { 1456 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 1457 schedstat_inc(sd, ttwu_wake_remote); 1458 break; 1459 } 1460 } 1461 rcu_read_unlock(); 1462 } 1463 1464 if (wake_flags & WF_MIGRATED) 1465 schedstat_inc(p, se.statistics.nr_wakeups_migrate); 1466 1467#endif /* CONFIG_SMP */ 1468 1469 schedstat_inc(rq, ttwu_count); 1470 schedstat_inc(p, se.statistics.nr_wakeups); 1471 1472 if (wake_flags & WF_SYNC) 1473 schedstat_inc(p, se.statistics.nr_wakeups_sync); 1474 1475#endif /* CONFIG_SCHEDSTATS */ 1476} 1477 1478static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1479{ 1480 activate_task(rq, p, en_flags); 1481 p->on_rq = 1; 1482 1483 /* if a worker is waking up, notify workqueue */ 1484 if (p->flags & PF_WQ_WORKER) 1485 wq_worker_waking_up(p, cpu_of(rq)); 1486} 1487 1488/* 1489 * Mark the task runnable and perform wakeup-preemption. 1490 */ 1491static void 1492ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 1493{ 1494 check_preempt_curr(rq, p, wake_flags); 1495 trace_sched_wakeup(p, true); 1496 1497 p->state = TASK_RUNNING; 1498#ifdef CONFIG_SMP 1499 if (p->sched_class->task_woken) 1500 p->sched_class->task_woken(rq, p); 1501 1502 if (rq->idle_stamp) { 1503 u64 delta = rq_clock(rq) - rq->idle_stamp; 1504 u64 max = 2*rq->max_idle_balance_cost; 1505 1506 update_avg(&rq->avg_idle, delta); 1507 1508 if (rq->avg_idle > max) 1509 rq->avg_idle = max; 1510 1511 rq->idle_stamp = 0; 1512 } 1513#endif 1514} 1515 1516static void 1517ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) 1518{ 1519#ifdef CONFIG_SMP 1520 if (p->sched_contributes_to_load) 1521 rq->nr_uninterruptible--; 1522#endif 1523 1524 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); 1525 ttwu_do_wakeup(rq, p, wake_flags); 1526} 1527 1528/* 1529 * Called in case the task @p isn't fully descheduled from its runqueue, 1530 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 1531 * since all we need to do is flip p->state to TASK_RUNNING, since 1532 * the task is still ->on_rq. 1533 */ 1534static int ttwu_remote(struct task_struct *p, int wake_flags) 1535{ 1536 struct rq *rq; 1537 int ret = 0; 1538 1539 rq = __task_rq_lock(p); 1540 if (p->on_rq) { 1541 /* check_preempt_curr() may use rq clock */ 1542 update_rq_clock(rq); 1543 ttwu_do_wakeup(rq, p, wake_flags); 1544 ret = 1; 1545 } 1546 __task_rq_unlock(rq); 1547 1548 return ret; 1549} 1550 1551#ifdef CONFIG_SMP 1552void sched_ttwu_pending(void) 1553{ 1554 struct rq *rq = this_rq(); 1555 struct llist_node *llist = llist_del_all(&rq->wake_list); 1556 struct task_struct *p; 1557 unsigned long flags; 1558 1559 if (!llist) 1560 return; 1561 1562 raw_spin_lock_irqsave(&rq->lock, flags); 1563 1564 while (llist) { 1565 p = llist_entry(llist, struct task_struct, wake_entry); 1566 llist = llist_next(llist); 1567 ttwu_do_activate(rq, p, 0); 1568 } 1569 1570 raw_spin_unlock_irqrestore(&rq->lock, flags); 1571} 1572 1573void scheduler_ipi(void) 1574{ 1575 /* 1576 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 1577 * TIF_NEED_RESCHED remotely (for the first time) will also send 1578 * this IPI. 1579 */ 1580 preempt_fold_need_resched(); 1581 1582 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 1583 return; 1584 1585 /* 1586 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 1587 * traditionally all their work was done from the interrupt return 1588 * path. Now that we actually do some work, we need to make sure 1589 * we do call them. 1590 * 1591 * Some archs already do call them, luckily irq_enter/exit nest 1592 * properly. 1593 * 1594 * Arguably we should visit all archs and update all handlers, 1595 * however a fair share of IPIs are still resched only so this would 1596 * somewhat pessimize the simple resched case. 1597 */ 1598 irq_enter(); 1599 sched_ttwu_pending(); 1600 1601 /* 1602 * Check if someone kicked us for doing the nohz idle load balance. 1603 */ 1604 if (unlikely(got_nohz_idle_kick())) { 1605 this_rq()->idle_balance = 1; 1606 raise_softirq_irqoff(SCHED_SOFTIRQ); 1607 } 1608 irq_exit(); 1609} 1610 1611static void ttwu_queue_remote(struct task_struct *p, int cpu) 1612{ 1613 struct rq *rq = cpu_rq(cpu); 1614 1615 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 1616 if (!set_nr_if_polling(rq->idle)) 1617 smp_send_reschedule(cpu); 1618 else 1619 trace_sched_wake_idle_without_ipi(cpu); 1620 } 1621} 1622 1623bool cpus_share_cache(int this_cpu, int that_cpu) 1624{ 1625 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1626} 1627#endif /* CONFIG_SMP */ 1628 1629static void ttwu_queue(struct task_struct *p, int cpu) 1630{ 1631 struct rq *rq = cpu_rq(cpu); 1632 1633#if defined(CONFIG_SMP) 1634 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1635 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1636 ttwu_queue_remote(p, cpu); 1637 return; 1638 } 1639#endif 1640 1641 raw_spin_lock(&rq->lock); 1642 ttwu_do_activate(rq, p, 0); 1643 raw_spin_unlock(&rq->lock); 1644} 1645 1646/** 1647 * try_to_wake_up - wake up a thread 1648 * @p: the thread to be awakened 1649 * @state: the mask of task states that can be woken 1650 * @wake_flags: wake modifier flags (WF_*) 1651 * 1652 * Put it on the run-queue if it's not already there. The "current" 1653 * thread is always on the run-queue (except when the actual 1654 * re-schedule is in progress), and as such you're allowed to do 1655 * the simpler "current->state = TASK_RUNNING" to mark yourself 1656 * runnable without the overhead of this. 1657 * 1658 * Return: %true if @p was woken up, %false if it was already running. 1659 * or @state didn't match @p's state. 1660 */ 1661static int 1662try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 1663{ 1664 unsigned long flags; 1665 int cpu, success = 0; 1666 1667 /* 1668 * If we are going to wake up a thread waiting for CONDITION we 1669 * need to ensure that CONDITION=1 done by the caller can not be 1670 * reordered with p->state check below. This pairs with mb() in 1671 * set_current_state() the waiting thread does. 1672 */ 1673 smp_mb__before_spinlock(); 1674 raw_spin_lock_irqsave(&p->pi_lock, flags); 1675 if (!(p->state & state)) 1676 goto out; 1677 1678 success = 1; /* we're going to change ->state */ 1679 cpu = task_cpu(p); 1680 1681 if (p->on_rq && ttwu_remote(p, wake_flags)) 1682 goto stat; 1683 1684#ifdef CONFIG_SMP 1685 /* 1686 * If the owning (remote) cpu is still in the middle of schedule() with 1687 * this task as prev, wait until its done referencing the task. 1688 */ 1689 while (p->on_cpu) 1690 cpu_relax(); 1691 /* 1692 * Pairs with the smp_wmb() in finish_lock_switch(). 1693 */ 1694 smp_rmb(); 1695 1696 p->sched_contributes_to_load = !!task_contributes_to_load(p); 1697 p->state = TASK_WAKING; 1698 1699 if (p->sched_class->task_waking) 1700 p->sched_class->task_waking(p); 1701 1702 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 1703 if (task_cpu(p) != cpu) { 1704 wake_flags |= WF_MIGRATED; 1705 set_task_cpu(p, cpu); 1706 } 1707#endif /* CONFIG_SMP */ 1708 1709 ttwu_queue(p, cpu); 1710stat: 1711 ttwu_stat(p, cpu, wake_flags); 1712out: 1713 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 1714 1715 return success; 1716} 1717 1718/** 1719 * try_to_wake_up_local - try to wake up a local task with rq lock held 1720 * @p: the thread to be awakened 1721 * 1722 * Put @p on the run-queue if it's not already there. The caller must 1723 * ensure that this_rq() is locked, @p is bound to this_rq() and not 1724 * the current task. 1725 */ 1726static void try_to_wake_up_local(struct task_struct *p) 1727{ 1728 struct rq *rq = task_rq(p); 1729 1730 if (WARN_ON_ONCE(rq != this_rq()) || 1731 WARN_ON_ONCE(p == current)) 1732 return; 1733 1734 lockdep_assert_held(&rq->lock); 1735 1736 if (!raw_spin_trylock(&p->pi_lock)) { 1737 raw_spin_unlock(&rq->lock); 1738 raw_spin_lock(&p->pi_lock); 1739 raw_spin_lock(&rq->lock); 1740 } 1741 1742 if (!(p->state & TASK_NORMAL)) 1743 goto out; 1744 1745 if (!p->on_rq) 1746 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 1747 1748 ttwu_do_wakeup(rq, p, 0); 1749 ttwu_stat(p, smp_processor_id(), 0); 1750out: 1751 raw_spin_unlock(&p->pi_lock); 1752} 1753 1754/** 1755 * wake_up_process - Wake up a specific process 1756 * @p: The process to be woken up. 1757 * 1758 * Attempt to wake up the nominated process and move it to the set of runnable 1759 * processes. 1760 * 1761 * Return: 1 if the process was woken up, 0 if it was already running. 1762 * 1763 * It may be assumed that this function implies a write memory barrier before 1764 * changing the task state if and only if any tasks are woken up. 1765 */ 1766int wake_up_process(struct task_struct *p) 1767{ 1768 WARN_ON(task_is_stopped_or_traced(p)); 1769 return try_to_wake_up(p, TASK_NORMAL, 0); 1770} 1771EXPORT_SYMBOL(wake_up_process); 1772 1773int wake_up_state(struct task_struct *p, unsigned int state) 1774{ 1775 return try_to_wake_up(p, state, 0); 1776} 1777 1778/* 1779 * Perform scheduler related setup for a newly forked process p. 1780 * p is forked by current. 1781 * 1782 * __sched_fork() is basic setup used by init_idle() too: 1783 */ 1784static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 1785{ 1786 p->on_rq = 0; 1787 1788 p->se.on_rq = 0; 1789 p->se.exec_start = 0; 1790 p->se.sum_exec_runtime = 0; 1791 p->se.prev_sum_exec_runtime = 0; 1792 p->se.nr_migrations = 0; 1793 p->se.vruntime = 0; 1794 INIT_LIST_HEAD(&p->se.group_node); 1795 1796#ifdef CONFIG_SCHEDSTATS 1797 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 1798#endif 1799 1800 RB_CLEAR_NODE(&p->dl.rb_node); 1801 hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1802 p->dl.dl_runtime = p->dl.runtime = 0; 1803 p->dl.dl_deadline = p->dl.deadline = 0; 1804 p->dl.dl_period = 0; 1805 p->dl.flags = 0; 1806 1807 INIT_LIST_HEAD(&p->rt.run_list); 1808 1809#ifdef CONFIG_PREEMPT_NOTIFIERS 1810 INIT_HLIST_HEAD(&p->preempt_notifiers); 1811#endif 1812 1813#ifdef CONFIG_NUMA_BALANCING 1814 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { 1815 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 1816 p->mm->numa_scan_seq = 0; 1817 } 1818 1819 if (clone_flags & CLONE_VM) 1820 p->numa_preferred_nid = current->numa_preferred_nid; 1821 else 1822 p->numa_preferred_nid = -1; 1823 1824 p->node_stamp = 0ULL; 1825 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; 1826 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 1827 p->numa_work.next = &p->numa_work; 1828 p->numa_faults_memory = NULL; 1829 p->numa_faults_buffer_memory = NULL; 1830 p->last_task_numa_placement = 0; 1831 p->last_sum_exec_runtime = 0; 1832 1833 INIT_LIST_HEAD(&p->numa_entry); 1834 p->numa_group = NULL; 1835#endif /* CONFIG_NUMA_BALANCING */ 1836} 1837 1838#ifdef CONFIG_NUMA_BALANCING 1839#ifdef CONFIG_SCHED_DEBUG 1840void set_numabalancing_state(bool enabled) 1841{ 1842 if (enabled) 1843 sched_feat_set("NUMA"); 1844 else 1845 sched_feat_set("NO_NUMA"); 1846} 1847#else 1848__read_mostly bool numabalancing_enabled; 1849 1850void set_numabalancing_state(bool enabled) 1851{ 1852 numabalancing_enabled = enabled; 1853} 1854#endif /* CONFIG_SCHED_DEBUG */ 1855 1856#ifdef CONFIG_PROC_SYSCTL 1857int sysctl_numa_balancing(struct ctl_table *table, int write, 1858 void __user *buffer, size_t *lenp, loff_t *ppos) 1859{ 1860 struct ctl_table t; 1861 int err; 1862 int state = numabalancing_enabled; 1863 1864 if (write && !capable(CAP_SYS_ADMIN)) 1865 return -EPERM; 1866 1867 t = *table; 1868 t.data = &state; 1869 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 1870 if (err < 0) 1871 return err; 1872 if (write) 1873 set_numabalancing_state(state); 1874 return err; 1875} 1876#endif 1877#endif 1878 1879/* 1880 * fork()/clone()-time setup: 1881 */ 1882int sched_fork(unsigned long clone_flags, struct task_struct *p) 1883{ 1884 unsigned long flags; 1885 int cpu = get_cpu(); 1886 1887 __sched_fork(clone_flags, p); 1888 /* 1889 * We mark the process as running here. This guarantees that 1890 * nobody will actually run it, and a signal or other external 1891 * event cannot wake it up and insert it on the runqueue either. 1892 */ 1893 p->state = TASK_RUNNING; 1894 1895 /* 1896 * Make sure we do not leak PI boosting priority to the child. 1897 */ 1898 p->prio = current->normal_prio; 1899 1900 /* 1901 * Revert to default priority/policy on fork if requested. 1902 */ 1903 if (unlikely(p->sched_reset_on_fork)) { 1904 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 1905 p->policy = SCHED_NORMAL; 1906 p->static_prio = NICE_TO_PRIO(0); 1907 p->rt_priority = 0; 1908 } else if (PRIO_TO_NICE(p->static_prio) < 0) 1909 p->static_prio = NICE_TO_PRIO(0); 1910 1911 p->prio = p->normal_prio = __normal_prio(p); 1912 set_load_weight(p); 1913 1914 /* 1915 * We don't need the reset flag anymore after the fork. It has 1916 * fulfilled its duty: 1917 */ 1918 p->sched_reset_on_fork = 0; 1919 } 1920 1921 if (dl_prio(p->prio)) { 1922 put_cpu(); 1923 return -EAGAIN; 1924 } else if (rt_prio(p->prio)) { 1925 p->sched_class = &rt_sched_class; 1926 } else { 1927 p->sched_class = &fair_sched_class; 1928 } 1929 1930 if (p->sched_class->task_fork) 1931 p->sched_class->task_fork(p); 1932 1933 /* 1934 * The child is not yet in the pid-hash so no cgroup attach races, 1935 * and the cgroup is pinned to this child due to cgroup_fork() 1936 * is ran before sched_fork(). 1937 * 1938 * Silence PROVE_RCU. 1939 */ 1940 raw_spin_lock_irqsave(&p->pi_lock, flags); 1941 set_task_cpu(p, cpu); 1942 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 1943 1944#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1945 if (likely(sched_info_on())) 1946 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1947#endif 1948#if defined(CONFIG_SMP) 1949 p->on_cpu = 0; 1950#endif 1951 init_task_preempt_count(p); 1952#ifdef CONFIG_SMP 1953 plist_node_init(&p->pushable_tasks, MAX_PRIO); 1954 RB_CLEAR_NODE(&p->pushable_dl_tasks); 1955#endif 1956 1957 put_cpu(); 1958 return 0; 1959} 1960 1961unsigned long to_ratio(u64 period, u64 runtime) 1962{ 1963 if (runtime == RUNTIME_INF) 1964 return 1ULL << 20; 1965 1966 /* 1967 * Doing this here saves a lot of checks in all 1968 * the calling paths, and returning zero seems 1969 * safe for them anyway. 1970 */ 1971 if (period == 0) 1972 return 0; 1973 1974 return div64_u64(runtime << 20, period); 1975} 1976 1977#ifdef CONFIG_SMP 1978inline struct dl_bw *dl_bw_of(int i) 1979{ 1980 return &cpu_rq(i)->rd->dl_bw; 1981} 1982 1983static inline int dl_bw_cpus(int i) 1984{ 1985 struct root_domain *rd = cpu_rq(i)->rd; 1986 int cpus = 0; 1987 1988 for_each_cpu_and(i, rd->span, cpu_active_mask) 1989 cpus++; 1990 1991 return cpus; 1992} 1993#else 1994inline struct dl_bw *dl_bw_of(int i) 1995{ 1996 return &cpu_rq(i)->dl.dl_bw; 1997} 1998 1999static inline int dl_bw_cpus(int i) 2000{ 2001 return 1; 2002} 2003#endif 2004 2005static inline 2006void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) 2007{ 2008 dl_b->total_bw -= tsk_bw; 2009} 2010 2011static inline 2012void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) 2013{ 2014 dl_b->total_bw += tsk_bw; 2015} 2016 2017static inline 2018bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 2019{ 2020 return dl_b->bw != -1 && 2021 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 2022} 2023 2024/* 2025 * We must be sure that accepting a new task (or allowing changing the 2026 * parameters of an existing one) is consistent with the bandwidth 2027 * constraints. If yes, this function also accordingly updates the currently 2028 * allocated bandwidth to reflect the new situation. 2029 * 2030 * This function is called while holding p's rq->lock. 2031 */ 2032static int dl_overflow(struct task_struct *p, int policy, 2033 const struct sched_attr *attr) 2034{ 2035 2036 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 2037 u64 period = attr->sched_period ?: attr->sched_deadline; 2038 u64 runtime = attr->sched_runtime; 2039 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2040 int cpus, err = -1; 2041 2042 if (new_bw == p->dl.dl_bw) 2043 return 0; 2044 2045 /* 2046 * Either if a task, enters, leave, or stays -deadline but changes 2047 * its parameters, we may need to update accordingly the total 2048 * allocated bandwidth of the container. 2049 */ 2050 raw_spin_lock(&dl_b->lock); 2051 cpus = dl_bw_cpus(task_cpu(p)); 2052 if (dl_policy(policy) && !task_has_dl_policy(p) && 2053 !__dl_overflow(dl_b, cpus, 0, new_bw)) { 2054 __dl_add(dl_b, new_bw); 2055 err = 0; 2056 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2057 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { 2058 __dl_clear(dl_b, p->dl.dl_bw); 2059 __dl_add(dl_b, new_bw); 2060 err = 0; 2061 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2062 __dl_clear(dl_b, p->dl.dl_bw); 2063 err = 0; 2064 } 2065 raw_spin_unlock(&dl_b->lock); 2066 2067 return err; 2068} 2069 2070extern void init_dl_bw(struct dl_bw *dl_b); 2071 2072/* 2073 * wake_up_new_task - wake up a newly created task for the first time. 2074 * 2075 * This function will do some initial scheduler statistics housekeeping 2076 * that must be done for every newly created context, then puts the task 2077 * on the runqueue and wakes it. 2078 */ 2079void wake_up_new_task(struct task_struct *p) 2080{ 2081 unsigned long flags; 2082 struct rq *rq; 2083 2084 raw_spin_lock_irqsave(&p->pi_lock, flags); 2085#ifdef CONFIG_SMP 2086 /* 2087 * Fork balancing, do it here and not earlier because: 2088 * - cpus_allowed can change in the fork path 2089 * - any previously selected cpu might disappear through hotplug 2090 */ 2091 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2092#endif 2093 2094 /* Initialize new task's runnable average */ 2095 init_task_runnable_average(p); 2096 rq = __task_rq_lock(p); 2097 activate_task(rq, p, 0); 2098 p->on_rq = 1; 2099 trace_sched_wakeup_new(p, true); 2100 check_preempt_curr(rq, p, WF_FORK); 2101#ifdef CONFIG_SMP 2102 if (p->sched_class->task_woken) 2103 p->sched_class->task_woken(rq, p); 2104#endif 2105 task_rq_unlock(rq, p, &flags); 2106} 2107 2108#ifdef CONFIG_PREEMPT_NOTIFIERS 2109 2110/** 2111 * preempt_notifier_register - tell me when current is being preempted & rescheduled 2112 * @notifier: notifier struct to register 2113 */ 2114void preempt_notifier_register(struct preempt_notifier *notifier) 2115{ 2116 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 2117} 2118EXPORT_SYMBOL_GPL(preempt_notifier_register); 2119 2120/** 2121 * preempt_notifier_unregister - no longer interested in preemption notifications 2122 * @notifier: notifier struct to unregister 2123 * 2124 * This is safe to call from within a preemption notifier. 2125 */ 2126void preempt_notifier_unregister(struct preempt_notifier *notifier) 2127{ 2128 hlist_del(¬ifier->link); 2129} 2130EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 2131 2132static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2133{ 2134 struct preempt_notifier *notifier; 2135 2136 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2137 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 2138} 2139 2140static void 2141fire_sched_out_preempt_notifiers(struct task_struct *curr, 2142 struct task_struct *next) 2143{ 2144 struct preempt_notifier *notifier; 2145 2146 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2147 notifier->ops->sched_out(notifier, next); 2148} 2149 2150#else /* !CONFIG_PREEMPT_NOTIFIERS */ 2151 2152static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2153{ 2154} 2155 2156static void 2157fire_sched_out_preempt_notifiers(struct task_struct *curr, 2158 struct task_struct *next) 2159{ 2160} 2161 2162#endif /* CONFIG_PREEMPT_NOTIFIERS */ 2163 2164/** 2165 * prepare_task_switch - prepare to switch tasks 2166 * @rq: the runqueue preparing to switch 2167 * @prev: the current task that is being switched out 2168 * @next: the task we are going to switch to. 2169 * 2170 * This is called with the rq lock held and interrupts off. It must 2171 * be paired with a subsequent finish_task_switch after the context 2172 * switch. 2173 * 2174 * prepare_task_switch sets up locking and calls architecture specific 2175 * hooks. 2176 */ 2177static inline void 2178prepare_task_switch(struct rq *rq, struct task_struct *prev, 2179 struct task_struct *next) 2180{ 2181 trace_sched_switch(prev, next); 2182 sched_info_switch(rq, prev, next); 2183 perf_event_task_sched_out(prev, next); 2184 fire_sched_out_preempt_notifiers(prev, next); 2185 prepare_lock_switch(rq, next); 2186 prepare_arch_switch(next); 2187} 2188 2189/** 2190 * finish_task_switch - clean up after a task-switch 2191 * @rq: runqueue associated with task-switch 2192 * @prev: the thread we just switched away from. 2193 * 2194 * finish_task_switch must be called after the context switch, paired 2195 * with a prepare_task_switch call before the context switch. 2196 * finish_task_switch will reconcile locking set up by prepare_task_switch, 2197 * and do any other architecture-specific cleanup actions. 2198 * 2199 * Note that we may have delayed dropping an mm in context_switch(). If 2200 * so, we finish that here outside of the runqueue lock. (Doing it 2201 * with the lock held can cause deadlocks; see schedule() for 2202 * details.) 2203 */ 2204static void finish_task_switch(struct rq *rq, struct task_struct *prev) 2205 __releases(rq->lock) 2206{ 2207 struct mm_struct *mm = rq->prev_mm; 2208 long prev_state; 2209 2210 rq->prev_mm = NULL; 2211 2212 /* 2213 * A task struct has one reference for the use as "current". 2214 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 2215 * schedule one last time. The schedule call will never return, and 2216 * the scheduled task must drop that reference. 2217 * The test for TASK_DEAD must occur while the runqueue locks are 2218 * still held, otherwise prev could be scheduled on another cpu, die 2219 * there before we look at prev->state, and then the reference would 2220 * be dropped twice. 2221 * Manfred Spraul <manfred@colorfullife.com> 2222 */ 2223 prev_state = prev->state; 2224 vtime_task_switch(prev); 2225 finish_arch_switch(prev); 2226 perf_event_task_sched_in(prev, current); 2227 finish_lock_switch(rq, prev); 2228 finish_arch_post_lock_switch(); 2229 2230 fire_sched_in_preempt_notifiers(current); 2231 if (mm) 2232 mmdrop(mm); 2233 if (unlikely(prev_state == TASK_DEAD)) { 2234 if (prev->sched_class->task_dead) 2235 prev->sched_class->task_dead(prev); 2236 2237 /* 2238 * Remove function-return probe instances associated with this 2239 * task and put them back on the free list. 2240 */ 2241 kprobe_flush_task(prev); 2242 put_task_struct(prev); 2243 } 2244 2245 tick_nohz_task_switch(current); 2246} 2247 2248#ifdef CONFIG_SMP 2249 2250/* rq->lock is NOT held, but preemption is disabled */ 2251static inline void post_schedule(struct rq *rq) 2252{ 2253 if (rq->post_schedule) { 2254 unsigned long flags; 2255 2256 raw_spin_lock_irqsave(&rq->lock, flags); 2257 if (rq->curr->sched_class->post_schedule) 2258 rq->curr->sched_class->post_schedule(rq); 2259 raw_spin_unlock_irqrestore(&rq->lock, flags); 2260 2261 rq->post_schedule = 0; 2262 } 2263} 2264 2265#else 2266 2267static inline void post_schedule(struct rq *rq) 2268{ 2269} 2270 2271#endif 2272 2273/** 2274 * schedule_tail - first thing a freshly forked thread must call. 2275 * @prev: the thread we just switched away from. 2276 */ 2277asmlinkage __visible void schedule_tail(struct task_struct *prev) 2278 __releases(rq->lock) 2279{ 2280 struct rq *rq = this_rq(); 2281 2282 finish_task_switch(rq, prev); 2283 2284 /* 2285 * FIXME: do we need to worry about rq being invalidated by the 2286 * task_switch? 2287 */ 2288 post_schedule(rq); 2289 2290#ifdef __ARCH_WANT_UNLOCKED_CTXSW 2291 /* In this case, finish_task_switch does not reenable preemption */ 2292 preempt_enable(); 2293#endif 2294 if (current->set_child_tid) 2295 put_user(task_pid_vnr(current), current->set_child_tid); 2296} 2297 2298/* 2299 * context_switch - switch to the new MM and the new 2300 * thread's register state. 2301 */ 2302static inline void 2303context_switch(struct rq *rq, struct task_struct *prev, 2304 struct task_struct *next) 2305{ 2306 struct mm_struct *mm, *oldmm; 2307 2308 prepare_task_switch(rq, prev, next); 2309 2310 mm = next->mm; 2311 oldmm = prev->active_mm; 2312 /* 2313 * For paravirt, this is coupled with an exit in switch_to to 2314 * combine the page table reload and the switch backend into 2315 * one hypercall. 2316 */ 2317 arch_start_context_switch(prev); 2318 2319 if (!mm) { 2320 next->active_mm = oldmm; 2321 atomic_inc(&oldmm->mm_count); 2322 enter_lazy_tlb(oldmm, next); 2323 } else 2324 switch_mm(oldmm, mm, next); 2325 2326 if (!prev->mm) { 2327 prev->active_mm = NULL; 2328 rq->prev_mm = oldmm; 2329 } 2330 /* 2331 * Since the runqueue lock will be released by the next 2332 * task (which is an invalid locking op but in the case 2333 * of the scheduler it's an obvious special-case), so we 2334 * do an early lockdep release here: 2335 */ 2336#ifndef __ARCH_WANT_UNLOCKED_CTXSW 2337 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2338#endif 2339 2340 context_tracking_task_switch(prev, next); 2341 /* Here we just switch the register state and the stack. */ 2342 switch_to(prev, next, prev); 2343 2344 barrier(); 2345 /* 2346 * this_rq must be evaluated again because prev may have moved 2347 * CPUs since it called schedule(), thus the 'rq' on its stack 2348 * frame will be invalid. 2349 */ 2350 finish_task_switch(this_rq(), prev); 2351} 2352 2353/* 2354 * nr_running and nr_context_switches: 2355 * 2356 * externally visible scheduler statistics: current number of runnable 2357 * threads, total number of context switches performed since bootup. 2358 */ 2359unsigned long nr_running(void) 2360{ 2361 unsigned long i, sum = 0; 2362 2363 for_each_online_cpu(i) 2364 sum += cpu_rq(i)->nr_running; 2365 2366 return sum; 2367} 2368 2369unsigned long long nr_context_switches(void) 2370{ 2371 int i; 2372 unsigned long long sum = 0; 2373 2374 for_each_possible_cpu(i) 2375 sum += cpu_rq(i)->nr_switches; 2376 2377 return sum; 2378} 2379 2380unsigned long nr_iowait(void) 2381{ 2382 unsigned long i, sum = 0; 2383 2384 for_each_possible_cpu(i) 2385 sum += atomic_read(&cpu_rq(i)->nr_iowait); 2386 2387 return sum; 2388} 2389 2390unsigned long nr_iowait_cpu(int cpu) 2391{ 2392 struct rq *this = cpu_rq(cpu); 2393 return atomic_read(&this->nr_iowait); 2394} 2395 2396#ifdef CONFIG_SMP 2397 2398/* 2399 * sched_exec - execve() is a valuable balancing opportunity, because at 2400 * this point the task has the smallest effective memory and cache footprint. 2401 */ 2402void sched_exec(void) 2403{ 2404 struct task_struct *p = current; 2405 unsigned long flags; 2406 int dest_cpu; 2407 2408 raw_spin_lock_irqsave(&p->pi_lock, flags); 2409 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 2410 if (dest_cpu == smp_processor_id()) 2411 goto unlock; 2412 2413 if (likely(cpu_active(dest_cpu))) { 2414 struct migration_arg arg = { p, dest_cpu }; 2415 2416 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2417 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 2418 return; 2419 } 2420unlock: 2421 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2422} 2423 2424#endif 2425 2426DEFINE_PER_CPU(struct kernel_stat, kstat); 2427DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 2428 2429EXPORT_PER_CPU_SYMBOL(kstat); 2430EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2431 2432/* 2433 * Return any ns on the sched_clock that have not yet been accounted in 2434 * @p in case that task is currently running. 2435 * 2436 * Called with task_rq_lock() held on @rq. 2437 */ 2438static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) 2439{ 2440 u64 ns = 0; 2441 2442 /* 2443 * Must be ->curr _and_ ->on_rq. If dequeued, we would 2444 * project cycles that may never be accounted to this 2445 * thread, breaking clock_gettime(). 2446 */ 2447 if (task_current(rq, p) && p->on_rq) { 2448 update_rq_clock(rq); 2449 ns = rq_clock_task(rq) - p->se.exec_start; 2450 if ((s64)ns < 0) 2451 ns = 0; 2452 } 2453 2454 return ns; 2455} 2456 2457unsigned long long task_delta_exec(struct task_struct *p) 2458{ 2459 unsigned long flags; 2460 struct rq *rq; 2461 u64 ns = 0; 2462 2463 rq = task_rq_lock(p, &flags); 2464 ns = do_task_delta_exec(p, rq); 2465 task_rq_unlock(rq, p, &flags); 2466 2467 return ns; 2468} 2469 2470/* 2471 * Return accounted runtime for the task. 2472 * In case the task is currently running, return the runtime plus current's 2473 * pending runtime that have not been accounted yet. 2474 */ 2475unsigned long long task_sched_runtime(struct task_struct *p) 2476{ 2477 unsigned long flags; 2478 struct rq *rq; 2479 u64 ns = 0; 2480 2481#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2482 /* 2483 * 64-bit doesn't need locks to atomically read a 64bit value. 2484 * So we have a optimization chance when the task's delta_exec is 0. 2485 * Reading ->on_cpu is racy, but this is ok. 2486 * 2487 * If we race with it leaving cpu, we'll take a lock. So we're correct. 2488 * If we race with it entering cpu, unaccounted time is 0. This is 2489 * indistinguishable from the read occurring a few cycles earlier. 2490 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 2491 * been accounted, so we're correct here as well. 2492 */ 2493 if (!p->on_cpu || !p->on_rq) 2494 return p->se.sum_exec_runtime; 2495#endif 2496 2497 rq = task_rq_lock(p, &flags); 2498 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); 2499 task_rq_unlock(rq, p, &flags); 2500 2501 return ns; 2502} 2503 2504/* 2505 * This function gets called by the timer code, with HZ frequency. 2506 * We call it with interrupts disabled. 2507 */ 2508void scheduler_tick(void) 2509{ 2510 int cpu = smp_processor_id(); 2511 struct rq *rq = cpu_rq(cpu); 2512 struct task_struct *curr = rq->curr; 2513 2514 sched_clock_tick(); 2515 2516 raw_spin_lock(&rq->lock); 2517 update_rq_clock(rq); 2518 curr->sched_class->task_tick(rq, curr, 0); 2519 update_cpu_load_active(rq); 2520 raw_spin_unlock(&rq->lock); 2521 2522 perf_event_task_tick(); 2523 2524#ifdef CONFIG_SMP 2525 rq->idle_balance = idle_cpu(cpu); 2526 trigger_load_balance(rq); 2527#endif 2528 rq_last_tick_reset(rq); 2529} 2530 2531#ifdef CONFIG_NO_HZ_FULL 2532/** 2533 * scheduler_tick_max_deferment 2534 * 2535 * Keep at least one tick per second when a single 2536 * active task is running because the scheduler doesn't 2537 * yet completely support full dynticks environment. 2538 * 2539 * This makes sure that uptime, CFS vruntime, load 2540 * balancing, etc... continue to move forward, even 2541 * with a very low granularity. 2542 * 2543 * Return: Maximum deferment in nanoseconds. 2544 */ 2545u64 scheduler_tick_max_deferment(void) 2546{ 2547 struct rq *rq = this_rq(); 2548 unsigned long next, now = ACCESS_ONCE(jiffies); 2549 2550 next = rq->last_sched_tick + HZ; 2551 2552 if (time_before_eq(next, now)) 2553 return 0; 2554 2555 return jiffies_to_nsecs(next - now); 2556} 2557#endif 2558 2559notrace unsigned long get_parent_ip(unsigned long addr) 2560{ 2561 if (in_lock_functions(addr)) { 2562 addr = CALLER_ADDR2; 2563 if (in_lock_functions(addr)) 2564 addr = CALLER_ADDR3; 2565 } 2566 return addr; 2567} 2568 2569#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 2570 defined(CONFIG_PREEMPT_TRACER)) 2571 2572void preempt_count_add(int val) 2573{ 2574#ifdef CONFIG_DEBUG_PREEMPT 2575 /* 2576 * Underflow? 2577 */ 2578 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 2579 return; 2580#endif 2581 __preempt_count_add(val); 2582#ifdef CONFIG_DEBUG_PREEMPT 2583 /* 2584 * Spinlock count overflowing soon? 2585 */ 2586 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 2587 PREEMPT_MASK - 10); 2588#endif 2589 if (preempt_count() == val) { 2590 unsigned long ip = get_parent_ip(CALLER_ADDR1); 2591#ifdef CONFIG_DEBUG_PREEMPT 2592 current->preempt_disable_ip = ip; 2593#endif 2594 trace_preempt_off(CALLER_ADDR0, ip); 2595 } 2596} 2597EXPORT_SYMBOL(preempt_count_add); 2598NOKPROBE_SYMBOL(preempt_count_add); 2599 2600void preempt_count_sub(int val) 2601{ 2602#ifdef CONFIG_DEBUG_PREEMPT 2603 /* 2604 * Underflow? 2605 */ 2606 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 2607 return; 2608 /* 2609 * Is the spinlock portion underflowing? 2610 */ 2611 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 2612 !(preempt_count() & PREEMPT_MASK))) 2613 return; 2614#endif 2615 2616 if (preempt_count() == val) 2617 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 2618 __preempt_count_sub(val); 2619} 2620EXPORT_SYMBOL(preempt_count_sub); 2621NOKPROBE_SYMBOL(preempt_count_sub); 2622 2623#endif 2624 2625/* 2626 * Print scheduling while atomic bug: 2627 */ 2628static noinline void __schedule_bug(struct task_struct *prev) 2629{ 2630 if (oops_in_progress) 2631 return; 2632 2633 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 2634 prev->comm, prev->pid, preempt_count()); 2635 2636 debug_show_held_locks(prev); 2637 print_modules(); 2638 if (irqs_disabled()) 2639 print_irqtrace_events(prev); 2640#ifdef CONFIG_DEBUG_PREEMPT 2641 if (in_atomic_preempt_off()) { 2642 pr_err("Preemption disabled at:"); 2643 print_ip_sym(current->preempt_disable_ip); 2644 pr_cont("\n"); 2645 } 2646#endif 2647 dump_stack(); 2648 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 2649} 2650 2651/* 2652 * Various schedule()-time debugging checks and statistics: 2653 */ 2654static inline void schedule_debug(struct task_struct *prev) 2655{ 2656 /* 2657 * Test if we are atomic. Since do_exit() needs to call into 2658 * schedule() atomically, we ignore that path. Otherwise whine 2659 * if we are scheduling when we should not. 2660 */ 2661 if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD)) 2662 __schedule_bug(prev); 2663 rcu_sleep_check(); 2664 2665 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 2666 2667 schedstat_inc(this_rq(), sched_count); 2668} 2669 2670/* 2671 * Pick up the highest-prio task: 2672 */ 2673static inline struct task_struct * 2674pick_next_task(struct rq *rq, struct task_struct *prev) 2675{ 2676 const struct sched_class *class = &fair_sched_class; 2677 struct task_struct *p; 2678 2679 /* 2680 * Optimization: we know that if all tasks are in 2681 * the fair class we can call that function directly: 2682 */ 2683 if (likely(prev->sched_class == class && 2684 rq->nr_running == rq->cfs.h_nr_running)) { 2685 p = fair_sched_class.pick_next_task(rq, prev); 2686 if (unlikely(p == RETRY_TASK)) 2687 goto again; 2688 2689 /* assumes fair_sched_class->next == idle_sched_class */ 2690 if (unlikely(!p)) 2691 p = idle_sched_class.pick_next_task(rq, prev); 2692 2693 return p; 2694 } 2695 2696again: 2697 for_each_class(class) { 2698 p = class->pick_next_task(rq, prev); 2699 if (p) { 2700 if (unlikely(p == RETRY_TASK)) 2701 goto again; 2702 return p; 2703 } 2704 } 2705 2706 BUG(); /* the idle class will always have a runnable task */ 2707} 2708 2709/* 2710 * __schedule() is the main scheduler function. 2711 * 2712 * The main means of driving the scheduler and thus entering this function are: 2713 * 2714 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 2715 * 2716 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 2717 * paths. For example, see arch/x86/entry_64.S. 2718 * 2719 * To drive preemption between tasks, the scheduler sets the flag in timer 2720 * interrupt handler scheduler_tick(). 2721 * 2722 * 3. Wakeups don't really cause entry into schedule(). They add a 2723 * task to the run-queue and that's it. 2724 * 2725 * Now, if the new task added to the run-queue preempts the current 2726 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 2727 * called on the nearest possible occasion: 2728 * 2729 * - If the kernel is preemptible (CONFIG_PREEMPT=y): 2730 * 2731 * - in syscall or exception context, at the next outmost 2732 * preempt_enable(). (this might be as soon as the wake_up()'s 2733 * spin_unlock()!) 2734 * 2735 * - in IRQ context, return from interrupt-handler to 2736 * preemptible context 2737 * 2738 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) 2739 * then at the next: 2740 * 2741 * - cond_resched() call 2742 * - explicit schedule() call 2743 * - return from syscall or exception to user-space 2744 * - return from interrupt-handler to user-space 2745 */ 2746static void __sched __schedule(void) 2747{ 2748 struct task_struct *prev, *next; 2749 unsigned long *switch_count; 2750 struct rq *rq; 2751 int cpu; 2752 2753need_resched: 2754 preempt_disable(); 2755 cpu = smp_processor_id(); 2756 rq = cpu_rq(cpu); 2757 rcu_note_context_switch(cpu); 2758 prev = rq->curr; 2759 2760 schedule_debug(prev); 2761 2762 if (sched_feat(HRTICK)) 2763 hrtick_clear(rq); 2764 2765 /* 2766 * Make sure that signal_pending_state()->signal_pending() below 2767 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 2768 * done by the caller to avoid the race with signal_wake_up(). 2769 */ 2770 smp_mb__before_spinlock(); 2771 raw_spin_lock_irq(&rq->lock); 2772 2773 switch_count = &prev->nivcsw; 2774 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 2775 if (unlikely(signal_pending_state(prev->state, prev))) { 2776 prev->state = TASK_RUNNING; 2777 } else { 2778 deactivate_task(rq, prev, DEQUEUE_SLEEP); 2779 prev->on_rq = 0; 2780 2781 /* 2782 * If a worker went to sleep, notify and ask workqueue 2783 * whether it wants to wake up a task to maintain 2784 * concurrency. 2785 */ 2786 if (prev->flags & PF_WQ_WORKER) { 2787 struct task_struct *to_wakeup; 2788 2789 to_wakeup = wq_worker_sleeping(prev, cpu); 2790 if (to_wakeup) 2791 try_to_wake_up_local(to_wakeup); 2792 } 2793 } 2794 switch_count = &prev->nvcsw; 2795 } 2796 2797 if (prev->on_rq || rq->skip_clock_update < 0) 2798 update_rq_clock(rq); 2799 2800 next = pick_next_task(rq, prev); 2801 clear_tsk_need_resched(prev); 2802 clear_preempt_need_resched(); 2803 rq->skip_clock_update = 0; 2804 2805 if (likely(prev != next)) { 2806 rq->nr_switches++; 2807 rq->curr = next; 2808 ++*switch_count; 2809 2810 context_switch(rq, prev, next); /* unlocks the rq */ 2811 /* 2812 * The context switch have flipped the stack from under us 2813 * and restored the local variables which were saved when 2814 * this task called schedule() in the past. prev == current 2815 * is still correct, but it can be moved to another cpu/rq. 2816 */ 2817 cpu = smp_processor_id(); 2818 rq = cpu_rq(cpu); 2819 } else 2820 raw_spin_unlock_irq(&rq->lock); 2821 2822 post_schedule(rq); 2823 2824 sched_preempt_enable_no_resched(); 2825 if (need_resched()) 2826 goto need_resched; 2827} 2828 2829static inline void sched_submit_work(struct task_struct *tsk) 2830{ 2831 if (!tsk->state || tsk_is_pi_blocked(tsk)) 2832 return; 2833 /* 2834 * If we are going to sleep and we have plugged IO queued, 2835 * make sure to submit it to avoid deadlocks. 2836 */ 2837 if (blk_needs_flush_plug(tsk)) 2838 blk_schedule_flush_plug(tsk); 2839} 2840 2841asmlinkage __visible void __sched schedule(void) 2842{ 2843 struct task_struct *tsk = current; 2844 2845 sched_submit_work(tsk); 2846 __schedule(); 2847} 2848EXPORT_SYMBOL(schedule); 2849 2850#ifdef CONFIG_CONTEXT_TRACKING 2851asmlinkage __visible void __sched schedule_user(void) 2852{ 2853 /* 2854 * If we come here after a random call to set_need_resched(), 2855 * or we have been woken up remotely but the IPI has not yet arrived, 2856 * we haven't yet exited the RCU idle mode. Do it here manually until 2857 * we find a better solution. 2858 */ 2859 user_exit(); 2860 schedule(); 2861 user_enter(); 2862} 2863#endif 2864 2865/** 2866 * schedule_preempt_disabled - called with preemption disabled 2867 * 2868 * Returns with preemption disabled. Note: preempt_count must be 1 2869 */ 2870void __sched schedule_preempt_disabled(void) 2871{ 2872 sched_preempt_enable_no_resched(); 2873 schedule(); 2874 preempt_disable(); 2875} 2876 2877#ifdef CONFIG_PREEMPT 2878/* 2879 * this is the entry point to schedule() from in-kernel preemption 2880 * off of preempt_enable. Kernel preemptions off return from interrupt 2881 * occur there and call schedule directly. 2882 */ 2883asmlinkage __visible void __sched notrace preempt_schedule(void) 2884{ 2885 /* 2886 * If there is a non-zero preempt_count or interrupts are disabled, 2887 * we do not want to preempt the current task. Just return.. 2888 */ 2889 if (likely(!preemptible())) 2890 return; 2891 2892 do { 2893 __preempt_count_add(PREEMPT_ACTIVE); 2894 __schedule(); 2895 __preempt_count_sub(PREEMPT_ACTIVE); 2896 2897 /* 2898 * Check again in case we missed a preemption opportunity 2899 * between schedule and now. 2900 */ 2901 barrier(); 2902 } while (need_resched()); 2903} 2904NOKPROBE_SYMBOL(preempt_schedule); 2905EXPORT_SYMBOL(preempt_schedule); 2906#endif /* CONFIG_PREEMPT */ 2907 2908/* 2909 * this is the entry point to schedule() from kernel preemption 2910 * off of irq context. 2911 * Note, that this is called and return with irqs disabled. This will 2912 * protect us against recursive calling from irq. 2913 */ 2914asmlinkage __visible void __sched preempt_schedule_irq(void) 2915{ 2916 enum ctx_state prev_state; 2917 2918 /* Catch callers which need to be fixed */ 2919 BUG_ON(preempt_count() || !irqs_disabled()); 2920 2921 prev_state = exception_enter(); 2922 2923 do { 2924 __preempt_count_add(PREEMPT_ACTIVE); 2925 local_irq_enable(); 2926 __schedule(); 2927 local_irq_disable(); 2928 __preempt_count_sub(PREEMPT_ACTIVE); 2929 2930 /* 2931 * Check again in case we missed a preemption opportunity 2932 * between schedule and now. 2933 */ 2934 barrier(); 2935 } while (need_resched()); 2936 2937 exception_exit(prev_state); 2938} 2939 2940int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 2941 void *key) 2942{ 2943 return try_to_wake_up(curr->private, mode, wake_flags); 2944} 2945EXPORT_SYMBOL(default_wake_function); 2946 2947#ifdef CONFIG_RT_MUTEXES 2948 2949/* 2950 * rt_mutex_setprio - set the current priority of a task 2951 * @p: task 2952 * @prio: prio value (kernel-internal form) 2953 * 2954 * This function changes the 'effective' priority of a task. It does 2955 * not touch ->normal_prio like __setscheduler(). 2956 * 2957 * Used by the rt_mutex code to implement priority inheritance 2958 * logic. Call site only calls if the priority of the task changed. 2959 */ 2960void rt_mutex_setprio(struct task_struct *p, int prio) 2961{ 2962 int oldprio, on_rq, running, enqueue_flag = 0; 2963 struct rq *rq; 2964 const struct sched_class *prev_class; 2965 2966 BUG_ON(prio > MAX_PRIO); 2967 2968 rq = __task_rq_lock(p); 2969 2970 /* 2971 * Idle task boosting is a nono in general. There is one 2972 * exception, when PREEMPT_RT and NOHZ is active: 2973 * 2974 * The idle task calls get_next_timer_interrupt() and holds 2975 * the timer wheel base->lock on the CPU and another CPU wants 2976 * to access the timer (probably to cancel it). We can safely 2977 * ignore the boosting request, as the idle CPU runs this code 2978 * with interrupts disabled and will complete the lock 2979 * protected section without being interrupted. So there is no 2980 * real need to boost. 2981 */ 2982 if (unlikely(p == rq->idle)) { 2983 WARN_ON(p != rq->curr); 2984 WARN_ON(p->pi_blocked_on); 2985 goto out_unlock; 2986 } 2987 2988 trace_sched_pi_setprio(p, prio); 2989 oldprio = p->prio; 2990 prev_class = p->sched_class; 2991 on_rq = p->on_rq; 2992 running = task_current(rq, p); 2993 if (on_rq) 2994 dequeue_task(rq, p, 0); 2995 if (running) 2996 p->sched_class->put_prev_task(rq, p); 2997 2998 /* 2999 * Boosting condition are: 3000 * 1. -rt task is running and holds mutex A 3001 * --> -dl task blocks on mutex A 3002 * 3003 * 2. -dl task is running and holds mutex A 3004 * --> -dl task blocks on mutex A and could preempt the 3005 * running task 3006 */ 3007 if (dl_prio(prio)) { 3008 struct task_struct *pi_task = rt_mutex_get_top_task(p); 3009 if (!dl_prio(p->normal_prio) || 3010 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 3011 p->dl.dl_boosted = 1; 3012 p->dl.dl_throttled = 0; 3013 enqueue_flag = ENQUEUE_REPLENISH; 3014 } else 3015 p->dl.dl_boosted = 0; 3016 p->sched_class = &dl_sched_class; 3017 } else if (rt_prio(prio)) { 3018 if (dl_prio(oldprio)) 3019 p->dl.dl_boosted = 0; 3020 if (oldprio < prio) 3021 enqueue_flag = ENQUEUE_HEAD; 3022 p->sched_class = &rt_sched_class; 3023 } else { 3024 if (dl_prio(oldprio)) 3025 p->dl.dl_boosted = 0; 3026 p->sched_class = &fair_sched_class; 3027 } 3028 3029 p->prio = prio; 3030 3031 if (running) 3032 p->sched_class->set_curr_task(rq); 3033 if (on_rq) 3034 enqueue_task(rq, p, enqueue_flag); 3035 3036 check_class_changed(rq, p, prev_class, oldprio); 3037out_unlock: 3038 __task_rq_unlock(rq); 3039} 3040#endif 3041 3042void set_user_nice(struct task_struct *p, long nice) 3043{ 3044 int old_prio, delta, on_rq; 3045 unsigned long flags; 3046 struct rq *rq; 3047 3048 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 3049 return; 3050 /* 3051 * We have to be careful, if called from sys_setpriority(), 3052 * the task might be in the middle of scheduling on another CPU. 3053 */ 3054 rq = task_rq_lock(p, &flags); 3055 /* 3056 * The RT priorities are set via sched_setscheduler(), but we still 3057 * allow the 'normal' nice value to be set - but as expected 3058 * it wont have any effect on scheduling until the task is 3059 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 3060 */ 3061 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3062 p->static_prio = NICE_TO_PRIO(nice); 3063 goto out_unlock; 3064 } 3065 on_rq = p->on_rq; 3066 if (on_rq) 3067 dequeue_task(rq, p, 0); 3068 3069 p->static_prio = NICE_TO_PRIO(nice); 3070 set_load_weight(p); 3071 old_prio = p->prio; 3072 p->prio = effective_prio(p); 3073 delta = p->prio - old_prio; 3074 3075 if (on_rq) { 3076 enqueue_task(rq, p, 0); 3077 /* 3078 * If the task increased its priority or is running and 3079 * lowered its priority, then reschedule its CPU: 3080 */ 3081 if (delta < 0 || (delta > 0 && task_running(rq, p))) 3082 resched_curr(rq); 3083 } 3084out_unlock: 3085 task_rq_unlock(rq, p, &flags); 3086} 3087EXPORT_SYMBOL(set_user_nice); 3088 3089/* 3090 * can_nice - check if a task can reduce its nice value 3091 * @p: task 3092 * @nice: nice value 3093 */ 3094int can_nice(const struct task_struct *p, const int nice) 3095{ 3096 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3097 int nice_rlim = nice_to_rlimit(nice); 3098 3099 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 3100 capable(CAP_SYS_NICE)); 3101} 3102 3103#ifdef __ARCH_WANT_SYS_NICE 3104 3105/* 3106 * sys_nice - change the priority of the current process. 3107 * @increment: priority increment 3108 * 3109 * sys_setpriority is a more generic, but much slower function that 3110 * does similar things. 3111 */ 3112SYSCALL_DEFINE1(nice, int, increment) 3113{ 3114 long nice, retval; 3115 3116 /* 3117 * Setpriority might change our priority at the same moment. 3118 * We don't have to worry. Conceptually one call occurs first 3119 * and we have a single winner. 3120 */ 3121 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 3122 nice = task_nice(current) + increment; 3123 3124 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 3125 if (increment < 0 && !can_nice(current, nice)) 3126 return -EPERM; 3127 3128 retval = security_task_setnice(current, nice); 3129 if (retval) 3130 return retval; 3131 3132 set_user_nice(current, nice); 3133 return 0; 3134} 3135 3136#endif 3137 3138/** 3139 * task_prio - return the priority value of a given task. 3140 * @p: the task in question. 3141 * 3142 * Return: The priority value as seen by users in /proc. 3143 * RT tasks are offset by -200. Normal tasks are centered 3144 * around 0, value goes from -16 to +15. 3145 */ 3146int task_prio(const struct task_struct *p) 3147{ 3148 return p->prio - MAX_RT_PRIO; 3149} 3150 3151/** 3152 * idle_cpu - is a given cpu idle currently? 3153 * @cpu: the processor in question. 3154 * 3155 * Return: 1 if the CPU is currently idle. 0 otherwise. 3156 */ 3157int idle_cpu(int cpu) 3158{ 3159 struct rq *rq = cpu_rq(cpu); 3160 3161 if (rq->curr != rq->idle) 3162 return 0; 3163 3164 if (rq->nr_running) 3165 return 0; 3166 3167#ifdef CONFIG_SMP 3168 if (!llist_empty(&rq->wake_list)) 3169 return 0; 3170#endif 3171 3172 return 1; 3173} 3174 3175/** 3176 * idle_task - return the idle task for a given cpu. 3177 * @cpu: the processor in question. 3178 * 3179 * Return: The idle task for the cpu @cpu. 3180 */ 3181struct task_struct *idle_task(int cpu) 3182{ 3183 return cpu_rq(cpu)->idle; 3184} 3185 3186/** 3187 * find_process_by_pid - find a process with a matching PID value. 3188 * @pid: the pid in question. 3189 * 3190 * The task of @pid, if found. %NULL otherwise. 3191 */ 3192static struct task_struct *find_process_by_pid(pid_t pid) 3193{ 3194 return pid ? find_task_by_vpid(pid) : current; 3195} 3196 3197/* 3198 * This function initializes the sched_dl_entity of a newly becoming 3199 * SCHED_DEADLINE task. 3200 * 3201 * Only the static values are considered here, the actual runtime and the 3202 * absolute deadline will be properly calculated when the task is enqueued 3203 * for the first time with its new policy. 3204 */ 3205static void 3206__setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3207{ 3208 struct sched_dl_entity *dl_se = &p->dl; 3209 3210 init_dl_task_timer(dl_se); 3211 dl_se->dl_runtime = attr->sched_runtime; 3212 dl_se->dl_deadline = attr->sched_deadline; 3213 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3214 dl_se->flags = attr->sched_flags; 3215 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3216 dl_se->dl_throttled = 0; 3217 dl_se->dl_new = 1; 3218 dl_se->dl_yielded = 0; 3219} 3220 3221static void __setscheduler_params(struct task_struct *p, 3222 const struct sched_attr *attr) 3223{ 3224 int policy = attr->sched_policy; 3225 3226 if (policy == -1) /* setparam */ 3227 policy = p->policy; 3228 3229 p->policy = policy; 3230 3231 if (dl_policy(policy)) 3232 __setparam_dl(p, attr); 3233 else if (fair_policy(policy)) 3234 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 3235 3236 /* 3237 * __sched_setscheduler() ensures attr->sched_priority == 0 when 3238 * !rt_policy. Always setting this ensures that things like 3239 * getparam()/getattr() don't report silly values for !rt tasks. 3240 */ 3241 p->rt_priority = attr->sched_priority; 3242 p->normal_prio = normal_prio(p); 3243 set_load_weight(p); 3244} 3245 3246/* Actually do priority change: must hold pi & rq lock. */ 3247static void __setscheduler(struct rq *rq, struct task_struct *p, 3248 const struct sched_attr *attr) 3249{ 3250 __setscheduler_params(p, attr); 3251 3252 /* 3253 * If we get here, there was no pi waiters boosting the 3254 * task. It is safe to use the normal prio. 3255 */ 3256 p->prio = normal_prio(p); 3257 3258 if (dl_prio(p->prio)) 3259 p->sched_class = &dl_sched_class; 3260 else if (rt_prio(p->prio)) 3261 p->sched_class = &rt_sched_class; 3262 else 3263 p->sched_class = &fair_sched_class; 3264} 3265 3266static void 3267__getparam_dl(struct task_struct *p, struct sched_attr *attr) 3268{ 3269 struct sched_dl_entity *dl_se = &p->dl; 3270 3271 attr->sched_priority = p->rt_priority; 3272 attr->sched_runtime = dl_se->dl_runtime; 3273 attr->sched_deadline = dl_se->dl_deadline; 3274 attr->sched_period = dl_se->dl_period; 3275 attr->sched_flags = dl_se->flags; 3276} 3277 3278/* 3279 * This function validates the new parameters of a -deadline task. 3280 * We ask for the deadline not being zero, and greater or equal 3281 * than the runtime, as well as the period of being zero or 3282 * greater than deadline. Furthermore, we have to be sure that 3283 * user parameters are above the internal resolution of 1us (we 3284 * check sched_runtime only since it is always the smaller one) and 3285 * below 2^63 ns (we have to check both sched_deadline and 3286 * sched_period, as the latter can be zero). 3287 */ 3288static bool 3289__checkparam_dl(const struct sched_attr *attr) 3290{ 3291 /* deadline != 0 */ 3292 if (attr->sched_deadline == 0) 3293 return false; 3294 3295 /* 3296 * Since we truncate DL_SCALE bits, make sure we're at least 3297 * that big. 3298 */ 3299 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3300 return false; 3301 3302 /* 3303 * Since we use the MSB for wrap-around and sign issues, make 3304 * sure it's not set (mind that period can be equal to zero). 3305 */ 3306 if (attr->sched_deadline & (1ULL << 63) || 3307 attr->sched_period & (1ULL << 63)) 3308 return false; 3309 3310 /* runtime <= deadline <= period (if period != 0) */ 3311 if ((attr->sched_period != 0 && 3312 attr->sched_period < attr->sched_deadline) || 3313 attr->sched_deadline < attr->sched_runtime) 3314 return false; 3315 3316 return true; 3317} 3318 3319/* 3320 * check the target process has a UID that matches the current process's 3321 */ 3322static bool check_same_owner(struct task_struct *p) 3323{ 3324 const struct cred *cred = current_cred(), *pcred; 3325 bool match; 3326 3327 rcu_read_lock(); 3328 pcred = __task_cred(p); 3329 match = (uid_eq(cred->euid, pcred->euid) || 3330 uid_eq(cred->euid, pcred->uid)); 3331 rcu_read_unlock(); 3332 return match; 3333} 3334 3335static int __sched_setscheduler(struct task_struct *p, 3336 const struct sched_attr *attr, 3337 bool user) 3338{ 3339 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3340 MAX_RT_PRIO - 1 - attr->sched_priority; 3341 int retval, oldprio, oldpolicy = -1, on_rq, running; 3342 int policy = attr->sched_policy; 3343 unsigned long flags; 3344 const struct sched_class *prev_class; 3345 struct rq *rq; 3346 int reset_on_fork; 3347 3348 /* may grab non-irq protected spin_locks */ 3349 BUG_ON(in_interrupt()); 3350recheck: 3351 /* double check policy once rq lock held */ 3352 if (policy < 0) { 3353 reset_on_fork = p->sched_reset_on_fork; 3354 policy = oldpolicy = p->policy; 3355 } else { 3356 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 3357 3358 if (policy != SCHED_DEADLINE && 3359 policy != SCHED_FIFO && policy != SCHED_RR && 3360 policy != SCHED_NORMAL && policy != SCHED_BATCH && 3361 policy != SCHED_IDLE) 3362 return -EINVAL; 3363 } 3364 3365 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK)) 3366 return -EINVAL; 3367 3368 /* 3369 * Valid priorities for SCHED_FIFO and SCHED_RR are 3370 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 3371 * SCHED_BATCH and SCHED_IDLE is 0. 3372 */ 3373 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 3374 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 3375 return -EINVAL; 3376 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 3377 (rt_policy(policy) != (attr->sched_priority != 0))) 3378 return -EINVAL; 3379 3380 /* 3381 * Allow unprivileged RT tasks to decrease priority: 3382 */ 3383 if (user && !capable(CAP_SYS_NICE)) { 3384 if (fair_policy(policy)) { 3385 if (attr->sched_nice < task_nice(p) && 3386 !can_nice(p, attr->sched_nice)) 3387 return -EPERM; 3388 } 3389 3390 if (rt_policy(policy)) { 3391 unsigned long rlim_rtprio = 3392 task_rlimit(p, RLIMIT_RTPRIO); 3393 3394 /* can't set/change the rt policy */ 3395 if (policy != p->policy && !rlim_rtprio) 3396 return -EPERM; 3397 3398 /* can't increase priority */ 3399 if (attr->sched_priority > p->rt_priority && 3400 attr->sched_priority > rlim_rtprio) 3401 return -EPERM; 3402 } 3403 3404 /* 3405 * Can't set/change SCHED_DEADLINE policy at all for now 3406 * (safest behavior); in the future we would like to allow 3407 * unprivileged DL tasks to increase their relative deadline 3408 * or reduce their runtime (both ways reducing utilization) 3409 */ 3410 if (dl_policy(policy)) 3411 return -EPERM; 3412 3413 /* 3414 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3415 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3416 */ 3417 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { 3418 if (!can_nice(p, task_nice(p))) 3419 return -EPERM; 3420 } 3421 3422 /* can't change other user's priorities */ 3423 if (!check_same_owner(p)) 3424 return -EPERM; 3425 3426 /* Normal users shall not reset the sched_reset_on_fork flag */ 3427 if (p->sched_reset_on_fork && !reset_on_fork) 3428 return -EPERM; 3429 } 3430 3431 if (user) { 3432 retval = security_task_setscheduler(p); 3433 if (retval) 3434 return retval; 3435 } 3436 3437 /* 3438 * make sure no PI-waiters arrive (or leave) while we are 3439 * changing the priority of the task: 3440 * 3441 * To be able to change p->policy safely, the appropriate 3442 * runqueue lock must be held. 3443 */ 3444 rq = task_rq_lock(p, &flags); 3445 3446 /* 3447 * Changing the policy of the stop threads its a very bad idea 3448 */ 3449 if (p == rq->stop) { 3450 task_rq_unlock(rq, p, &flags); 3451 return -EINVAL; 3452 } 3453 3454 /* 3455 * If not changing anything there's no need to proceed further, 3456 * but store a possible modification of reset_on_fork. 3457 */ 3458 if (unlikely(policy == p->policy)) { 3459 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 3460 goto change; 3461 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 3462 goto change; 3463 if (dl_policy(policy)) 3464 goto change; 3465 3466 p->sched_reset_on_fork = reset_on_fork; 3467 task_rq_unlock(rq, p, &flags); 3468 return 0; 3469 } 3470change: 3471 3472 if (user) { 3473#ifdef CONFIG_RT_GROUP_SCHED 3474 /* 3475 * Do not allow realtime tasks into groups that have no runtime 3476 * assigned. 3477 */ 3478 if (rt_bandwidth_enabled() && rt_policy(policy) && 3479 task_group(p)->rt_bandwidth.rt_runtime == 0 && 3480 !task_group_is_autogroup(task_group(p))) { 3481 task_rq_unlock(rq, p, &flags); 3482 return -EPERM; 3483 } 3484#endif 3485#ifdef CONFIG_SMP 3486 if (dl_bandwidth_enabled() && dl_policy(policy)) { 3487 cpumask_t *span = rq->rd->span; 3488 3489 /* 3490 * Don't allow tasks with an affinity mask smaller than 3491 * the entire root_domain to become SCHED_DEADLINE. We 3492 * will also fail if there's no bandwidth available. 3493 */ 3494 if (!cpumask_subset(span, &p->cpus_allowed) || 3495 rq->rd->dl_bw.bw == 0) { 3496 task_rq_unlock(rq, p, &flags); 3497 return -EPERM; 3498 } 3499 } 3500#endif 3501 } 3502 3503 /* recheck policy now with rq lock held */ 3504 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 3505 policy = oldpolicy = -1; 3506 task_rq_unlock(rq, p, &flags); 3507 goto recheck; 3508 } 3509 3510 /* 3511 * If setscheduling to SCHED_DEADLINE (or changing the parameters 3512 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 3513 * is available. 3514 */ 3515 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { 3516 task_rq_unlock(rq, p, &flags); 3517 return -EBUSY; 3518 } 3519 3520 p->sched_reset_on_fork = reset_on_fork; 3521 oldprio = p->prio; 3522 3523 /* 3524 * Special case for priority boosted tasks. 3525 * 3526 * If the new priority is lower or equal (user space view) 3527 * than the current (boosted) priority, we just store the new 3528 * normal parameters and do not touch the scheduler class and 3529 * the runqueue. This will be done when the task deboost 3530 * itself. 3531 */ 3532 if (rt_mutex_check_prio(p, newprio)) { 3533 __setscheduler_params(p, attr); 3534 task_rq_unlock(rq, p, &flags); 3535 return 0; 3536 } 3537 3538 on_rq = p->on_rq; 3539 running = task_current(rq, p); 3540 if (on_rq) 3541 dequeue_task(rq, p, 0); 3542 if (running) 3543 p->sched_class->put_prev_task(rq, p); 3544 3545 prev_class = p->sched_class; 3546 __setscheduler(rq, p, attr); 3547 3548 if (running) 3549 p->sched_class->set_curr_task(rq); 3550 if (on_rq) { 3551 /* 3552 * We enqueue to tail when the priority of a task is 3553 * increased (user space view). 3554 */ 3555 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); 3556 } 3557 3558 check_class_changed(rq, p, prev_class, oldprio); 3559 task_rq_unlock(rq, p, &flags); 3560 3561 rt_mutex_adjust_pi(p); 3562 3563 return 0; 3564} 3565 3566static int _sched_setscheduler(struct task_struct *p, int policy, 3567 const struct sched_param *param, bool check) 3568{ 3569 struct sched_attr attr = { 3570 .sched_policy = policy, 3571 .sched_priority = param->sched_priority, 3572 .sched_nice = PRIO_TO_NICE(p->static_prio), 3573 }; 3574 3575 /* 3576 * Fixup the legacy SCHED_RESET_ON_FORK hack 3577 */ 3578 if (policy & SCHED_RESET_ON_FORK) { 3579 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 3580 policy &= ~SCHED_RESET_ON_FORK; 3581 attr.sched_policy = policy; 3582 } 3583 3584 return __sched_setscheduler(p, &attr, check); 3585} 3586/** 3587 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 3588 * @p: the task in question. 3589 * @policy: new policy. 3590 * @param: structure containing the new RT priority. 3591 * 3592 * Return: 0 on success. An error code otherwise. 3593 * 3594 * NOTE that the task may be already dead. 3595 */ 3596int sched_setscheduler(struct task_struct *p, int policy, 3597 const struct sched_param *param) 3598{ 3599 return _sched_setscheduler(p, policy, param, true); 3600} 3601EXPORT_SYMBOL_GPL(sched_setscheduler); 3602 3603int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 3604{ 3605 return __sched_setscheduler(p, attr, true); 3606} 3607EXPORT_SYMBOL_GPL(sched_setattr); 3608 3609/** 3610 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 3611 * @p: the task in question. 3612 * @policy: new policy. 3613 * @param: structure containing the new RT priority. 3614 * 3615 * Just like sched_setscheduler, only don't bother checking if the 3616 * current context has permission. For example, this is needed in 3617 * stop_machine(): we create temporary high priority worker threads, 3618 * but our caller might not have that capability. 3619 * 3620 * Return: 0 on success. An error code otherwise. 3621 */ 3622int sched_setscheduler_nocheck(struct task_struct *p, int policy, 3623 const struct sched_param *param) 3624{ 3625 return _sched_setscheduler(p, policy, param, false); 3626} 3627 3628static int 3629do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 3630{ 3631 struct sched_param lparam; 3632 struct task_struct *p; 3633 int retval; 3634 3635 if (!param || pid < 0) 3636 return -EINVAL; 3637 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 3638 return -EFAULT; 3639 3640 rcu_read_lock(); 3641 retval = -ESRCH; 3642 p = find_process_by_pid(pid); 3643 if (p != NULL) 3644 retval = sched_setscheduler(p, policy, &lparam); 3645 rcu_read_unlock(); 3646 3647 return retval; 3648} 3649 3650/* 3651 * Mimics kernel/events/core.c perf_copy_attr(). 3652 */ 3653static int sched_copy_attr(struct sched_attr __user *uattr, 3654 struct sched_attr *attr) 3655{ 3656 u32 size; 3657 int ret; 3658 3659 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) 3660 return -EFAULT; 3661 3662 /* 3663 * zero the full structure, so that a short copy will be nice. 3664 */ 3665 memset(attr, 0, sizeof(*attr)); 3666 3667 ret = get_user(size, &uattr->size); 3668 if (ret) 3669 return ret; 3670 3671 if (size > PAGE_SIZE) /* silly large */ 3672 goto err_size; 3673 3674 if (!size) /* abi compat */ 3675 size = SCHED_ATTR_SIZE_VER0; 3676 3677 if (size < SCHED_ATTR_SIZE_VER0) 3678 goto err_size; 3679 3680 /* 3681 * If we're handed a bigger struct than we know of, 3682 * ensure all the unknown bits are 0 - i.e. new 3683 * user-space does not rely on any kernel feature 3684 * extensions we dont know about yet. 3685 */ 3686 if (size > sizeof(*attr)) { 3687 unsigned char __user *addr; 3688 unsigned char __user *end; 3689 unsigned char val; 3690 3691 addr = (void __user *)uattr + sizeof(*attr); 3692 end = (void __user *)uattr + size; 3693 3694 for (; addr < end; addr++) { 3695 ret = get_user(val, addr); 3696 if (ret) 3697 return ret; 3698 if (val) 3699 goto err_size; 3700 } 3701 size = sizeof(*attr); 3702 } 3703 3704 ret = copy_from_user(attr, uattr, size); 3705 if (ret) 3706 return -EFAULT; 3707 3708 /* 3709 * XXX: do we want to be lenient like existing syscalls; or do we want 3710 * to be strict and return an error on out-of-bounds values? 3711 */ 3712 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 3713 3714 return 0; 3715 3716err_size: 3717 put_user(sizeof(*attr), &uattr->size); 3718 return -E2BIG; 3719} 3720 3721/** 3722 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 3723 * @pid: the pid in question. 3724 * @policy: new policy. 3725 * @param: structure containing the new RT priority. 3726 * 3727 * Return: 0 on success. An error code otherwise. 3728 */ 3729SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 3730 struct sched_param __user *, param) 3731{ 3732 /* negative values for policy are not valid */ 3733 if (policy < 0) 3734 return -EINVAL; 3735 3736 return do_sched_setscheduler(pid, policy, param); 3737} 3738 3739/** 3740 * sys_sched_setparam - set/change the RT priority of a thread 3741 * @pid: the pid in question. 3742 * @param: structure containing the new RT priority. 3743 * 3744 * Return: 0 on success. An error code otherwise. 3745 */ 3746SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 3747{ 3748 return do_sched_setscheduler(pid, -1, param); 3749} 3750 3751/** 3752 * sys_sched_setattr - same as above, but with extended sched_attr 3753 * @pid: the pid in question. 3754 * @uattr: structure containing the extended parameters. 3755 * @flags: for future extension. 3756 */ 3757SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 3758 unsigned int, flags) 3759{ 3760 struct sched_attr attr; 3761 struct task_struct *p; 3762 int retval; 3763 3764 if (!uattr || pid < 0 || flags) 3765 return -EINVAL; 3766 3767 retval = sched_copy_attr(uattr, &attr); 3768 if (retval) 3769 return retval; 3770 3771 if ((int)attr.sched_policy < 0) 3772 return -EINVAL; 3773 3774 rcu_read_lock(); 3775 retval = -ESRCH; 3776 p = find_process_by_pid(pid); 3777 if (p != NULL) 3778 retval = sched_setattr(p, &attr); 3779 rcu_read_unlock(); 3780 3781 return retval; 3782} 3783 3784/** 3785 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3786 * @pid: the pid in question. 3787 * 3788 * Return: On success, the policy of the thread. Otherwise, a negative error 3789 * code. 3790 */ 3791SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 3792{ 3793 struct task_struct *p; 3794 int retval; 3795 3796 if (pid < 0) 3797 return -EINVAL; 3798 3799 retval = -ESRCH; 3800 rcu_read_lock(); 3801 p = find_process_by_pid(pid); 3802 if (p) { 3803 retval = security_task_getscheduler(p); 3804 if (!retval) 3805 retval = p->policy 3806 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 3807 } 3808 rcu_read_unlock(); 3809 return retval; 3810} 3811 3812/** 3813 * sys_sched_getparam - get the RT priority of a thread 3814 * @pid: the pid in question. 3815 * @param: structure containing the RT priority. 3816 * 3817 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 3818 * code. 3819 */ 3820SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3821{ 3822 struct sched_param lp = { .sched_priority = 0 }; 3823 struct task_struct *p; 3824 int retval; 3825 3826 if (!param || pid < 0) 3827 return -EINVAL; 3828 3829 rcu_read_lock(); 3830 p = find_process_by_pid(pid); 3831 retval = -ESRCH; 3832 if (!p) 3833 goto out_unlock; 3834 3835 retval = security_task_getscheduler(p); 3836 if (retval) 3837 goto out_unlock; 3838 3839 if (task_has_rt_policy(p)) 3840 lp.sched_priority = p->rt_priority; 3841 rcu_read_unlock(); 3842 3843 /* 3844 * This one might sleep, we cannot do it with a spinlock held ... 3845 */ 3846 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 3847 3848 return retval; 3849 3850out_unlock: 3851 rcu_read_unlock(); 3852 return retval; 3853} 3854 3855static int sched_read_attr(struct sched_attr __user *uattr, 3856 struct sched_attr *attr, 3857 unsigned int usize) 3858{ 3859 int ret; 3860 3861 if (!access_ok(VERIFY_WRITE, uattr, usize)) 3862 return -EFAULT; 3863 3864 /* 3865 * If we're handed a smaller struct than we know of, 3866 * ensure all the unknown bits are 0 - i.e. old 3867 * user-space does not get uncomplete information. 3868 */ 3869 if (usize < sizeof(*attr)) { 3870 unsigned char *addr; 3871 unsigned char *end; 3872 3873 addr = (void *)attr + usize; 3874 end = (void *)attr + sizeof(*attr); 3875 3876 for (; addr < end; addr++) { 3877 if (*addr) 3878 return -EFBIG; 3879 } 3880 3881 attr->size = usize; 3882 } 3883 3884 ret = copy_to_user(uattr, attr, attr->size); 3885 if (ret) 3886 return -EFAULT; 3887 3888 return 0; 3889} 3890 3891/** 3892 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 3893 * @pid: the pid in question. 3894 * @uattr: structure containing the extended parameters. 3895 * @size: sizeof(attr) for fwd/bwd comp. 3896 * @flags: for future extension. 3897 */ 3898SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3899 unsigned int, size, unsigned int, flags) 3900{ 3901 struct sched_attr attr = { 3902 .size = sizeof(struct sched_attr), 3903 }; 3904 struct task_struct *p; 3905 int retval; 3906 3907 if (!uattr || pid < 0 || size > PAGE_SIZE || 3908 size < SCHED_ATTR_SIZE_VER0 || flags) 3909 return -EINVAL; 3910 3911 rcu_read_lock(); 3912 p = find_process_by_pid(pid); 3913 retval = -ESRCH; 3914 if (!p) 3915 goto out_unlock; 3916 3917 retval = security_task_getscheduler(p); 3918 if (retval) 3919 goto out_unlock; 3920 3921 attr.sched_policy = p->policy; 3922 if (p->sched_reset_on_fork) 3923 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 3924 if (task_has_dl_policy(p)) 3925 __getparam_dl(p, &attr); 3926 else if (task_has_rt_policy(p)) 3927 attr.sched_priority = p->rt_priority; 3928 else 3929 attr.sched_nice = task_nice(p); 3930 3931 rcu_read_unlock(); 3932 3933 retval = sched_read_attr(uattr, &attr, size); 3934 return retval; 3935 3936out_unlock: 3937 rcu_read_unlock(); 3938 return retval; 3939} 3940 3941long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 3942{ 3943 cpumask_var_t cpus_allowed, new_mask; 3944 struct task_struct *p; 3945 int retval; 3946 3947 rcu_read_lock(); 3948 3949 p = find_process_by_pid(pid); 3950 if (!p) { 3951 rcu_read_unlock(); 3952 return -ESRCH; 3953 } 3954 3955 /* Prevent p going away */ 3956 get_task_struct(p); 3957 rcu_read_unlock(); 3958 3959 if (p->flags & PF_NO_SETAFFINITY) { 3960 retval = -EINVAL; 3961 goto out_put_task; 3962 } 3963 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 3964 retval = -ENOMEM; 3965 goto out_put_task; 3966 } 3967 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 3968 retval = -ENOMEM; 3969 goto out_free_cpus_allowed; 3970 } 3971 retval = -EPERM; 3972 if (!check_same_owner(p)) { 3973 rcu_read_lock(); 3974 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 3975 rcu_read_unlock(); 3976 goto out_unlock; 3977 } 3978 rcu_read_unlock(); 3979 } 3980 3981 retval = security_task_setscheduler(p); 3982 if (retval) 3983 goto out_unlock; 3984 3985 3986 cpuset_cpus_allowed(p, cpus_allowed); 3987 cpumask_and(new_mask, in_mask, cpus_allowed); 3988 3989 /* 3990 * Since bandwidth control happens on root_domain basis, 3991 * if admission test is enabled, we only admit -deadline 3992 * tasks allowed to run on all the CPUs in the task's 3993 * root_domain. 3994 */ 3995#ifdef CONFIG_SMP 3996 if (task_has_dl_policy(p)) { 3997 const struct cpumask *span = task_rq(p)->rd->span; 3998 3999 if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) { 4000 retval = -EBUSY; 4001 goto out_unlock; 4002 } 4003 } 4004#endif 4005again: 4006 retval = set_cpus_allowed_ptr(p, new_mask); 4007 4008 if (!retval) { 4009 cpuset_cpus_allowed(p, cpus_allowed); 4010 if (!cpumask_subset(new_mask, cpus_allowed)) { 4011 /* 4012 * We must have raced with a concurrent cpuset 4013 * update. Just reset the cpus_allowed to the 4014 * cpuset's cpus_allowed 4015 */ 4016 cpumask_copy(new_mask, cpus_allowed); 4017 goto again; 4018 } 4019 } 4020out_unlock: 4021 free_cpumask_var(new_mask); 4022out_free_cpus_allowed: 4023 free_cpumask_var(cpus_allowed); 4024out_put_task: 4025 put_task_struct(p); 4026 return retval; 4027} 4028 4029static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 4030 struct cpumask *new_mask) 4031{ 4032 if (len < cpumask_size()) 4033 cpumask_clear(new_mask); 4034 else if (len > cpumask_size()) 4035 len = cpumask_size(); 4036 4037 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 4038} 4039 4040/** 4041 * sys_sched_setaffinity - set the cpu affinity of a process 4042 * @pid: pid of the process 4043 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4044 * @user_mask_ptr: user-space pointer to the new cpu mask 4045 * 4046 * Return: 0 on success. An error code otherwise. 4047 */ 4048SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 4049 unsigned long __user *, user_mask_ptr) 4050{ 4051 cpumask_var_t new_mask; 4052 int retval; 4053 4054 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4055 return -ENOMEM; 4056 4057 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 4058 if (retval == 0) 4059 retval = sched_setaffinity(pid, new_mask); 4060 free_cpumask_var(new_mask); 4061 return retval; 4062} 4063 4064long sched_getaffinity(pid_t pid, struct cpumask *mask) 4065{ 4066 struct task_struct *p; 4067 unsigned long flags; 4068 int retval; 4069 4070 rcu_read_lock(); 4071 4072 retval = -ESRCH; 4073 p = find_process_by_pid(pid); 4074 if (!p) 4075 goto out_unlock; 4076 4077 retval = security_task_getscheduler(p); 4078 if (retval) 4079 goto out_unlock; 4080 4081 raw_spin_lock_irqsave(&p->pi_lock, flags); 4082 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); 4083 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4084 4085out_unlock: 4086 rcu_read_unlock(); 4087 4088 return retval; 4089} 4090 4091/** 4092 * sys_sched_getaffinity - get the cpu affinity of a process 4093 * @pid: pid of the process 4094 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4095 * @user_mask_ptr: user-space pointer to hold the current cpu mask 4096 * 4097 * Return: 0 on success. An error code otherwise. 4098 */ 4099SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 4100 unsigned long __user *, user_mask_ptr) 4101{ 4102 int ret; 4103 cpumask_var_t mask; 4104 4105 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 4106 return -EINVAL; 4107 if (len & (sizeof(unsigned long)-1)) 4108 return -EINVAL; 4109 4110 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4111 return -ENOMEM; 4112 4113 ret = sched_getaffinity(pid, mask); 4114 if (ret == 0) { 4115 size_t retlen = min_t(size_t, len, cpumask_size()); 4116 4117 if (copy_to_user(user_mask_ptr, mask, retlen)) 4118 ret = -EFAULT; 4119 else 4120 ret = retlen; 4121 } 4122 free_cpumask_var(mask); 4123 4124 return ret; 4125} 4126 4127/** 4128 * sys_sched_yield - yield the current processor to other threads. 4129 * 4130 * This function yields the current CPU to other tasks. If there are no 4131 * other threads running on this CPU then this function will return. 4132 * 4133 * Return: 0. 4134 */ 4135SYSCALL_DEFINE0(sched_yield) 4136{ 4137 struct rq *rq = this_rq_lock(); 4138 4139 schedstat_inc(rq, yld_count); 4140 current->sched_class->yield_task(rq); 4141 4142 /* 4143 * Since we are going to call schedule() anyway, there's 4144 * no need to preempt or enable interrupts: 4145 */ 4146 __release(rq->lock); 4147 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4148 do_raw_spin_unlock(&rq->lock); 4149 sched_preempt_enable_no_resched(); 4150 4151 schedule(); 4152 4153 return 0; 4154} 4155 4156static void __cond_resched(void) 4157{ 4158 __preempt_count_add(PREEMPT_ACTIVE); 4159 __schedule(); 4160 __preempt_count_sub(PREEMPT_ACTIVE); 4161} 4162 4163int __sched _cond_resched(void) 4164{ 4165 rcu_cond_resched(); 4166 if (should_resched()) { 4167 __cond_resched(); 4168 return 1; 4169 } 4170 return 0; 4171} 4172EXPORT_SYMBOL(_cond_resched); 4173 4174/* 4175 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 4176 * call schedule, and on return reacquire the lock. 4177 * 4178 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 4179 * operations here to prevent schedule() from being called twice (once via 4180 * spin_unlock(), once by hand). 4181 */ 4182int __cond_resched_lock(spinlock_t *lock) 4183{ 4184 bool need_rcu_resched = rcu_should_resched(); 4185 int resched = should_resched(); 4186 int ret = 0; 4187 4188 lockdep_assert_held(lock); 4189 4190 if (spin_needbreak(lock) || resched || need_rcu_resched) { 4191 spin_unlock(lock); 4192 if (resched) 4193 __cond_resched(); 4194 else if (unlikely(need_rcu_resched)) 4195 rcu_resched(); 4196 else 4197 cpu_relax(); 4198 ret = 1; 4199 spin_lock(lock); 4200 } 4201 return ret; 4202} 4203EXPORT_SYMBOL(__cond_resched_lock); 4204 4205int __sched __cond_resched_softirq(void) 4206{ 4207 BUG_ON(!in_softirq()); 4208 4209 rcu_cond_resched(); /* BH disabled OK, just recording QSes. */ 4210 if (should_resched()) { 4211 local_bh_enable(); 4212 __cond_resched(); 4213 local_bh_disable(); 4214 return 1; 4215 } 4216 return 0; 4217} 4218EXPORT_SYMBOL(__cond_resched_softirq); 4219 4220/** 4221 * yield - yield the current processor to other threads. 4222 * 4223 * Do not ever use this function, there's a 99% chance you're doing it wrong. 4224 * 4225 * The scheduler is at all times free to pick the calling task as the most 4226 * eligible task to run, if removing the yield() call from your code breaks 4227 * it, its already broken. 4228 * 4229 * Typical broken usage is: 4230 * 4231 * while (!event) 4232 * yield(); 4233 * 4234 * where one assumes that yield() will let 'the other' process run that will 4235 * make event true. If the current task is a SCHED_FIFO task that will never 4236 * happen. Never use yield() as a progress guarantee!! 4237 * 4238 * If you want to use yield() to wait for something, use wait_event(). 4239 * If you want to use yield() to be 'nice' for others, use cond_resched(). 4240 * If you still want to use yield(), do not! 4241 */ 4242void __sched yield(void) 4243{ 4244 set_current_state(TASK_RUNNING); 4245 sys_sched_yield(); 4246} 4247EXPORT_SYMBOL(yield); 4248 4249/** 4250 * yield_to - yield the current processor to another thread in 4251 * your thread group, or accelerate that thread toward the 4252 * processor it's on. 4253 * @p: target task 4254 * @preempt: whether task preemption is allowed or not 4255 * 4256 * It's the caller's job to ensure that the target task struct 4257 * can't go away on us before we can do any checks. 4258 * 4259 * Return: 4260 * true (>0) if we indeed boosted the target task. 4261 * false (0) if we failed to boost the target. 4262 * -ESRCH if there's no task to yield to. 4263 */ 4264int __sched yield_to(struct task_struct *p, bool preempt) 4265{ 4266 struct task_struct *curr = current; 4267 struct rq *rq, *p_rq; 4268 unsigned long flags; 4269 int yielded = 0; 4270 4271 local_irq_save(flags); 4272 rq = this_rq(); 4273 4274again: 4275 p_rq = task_rq(p); 4276 /* 4277 * If we're the only runnable task on the rq and target rq also 4278 * has only one task, there's absolutely no point in yielding. 4279 */ 4280 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 4281 yielded = -ESRCH; 4282 goto out_irq; 4283 } 4284 4285 double_rq_lock(rq, p_rq); 4286 if (task_rq(p) != p_rq) { 4287 double_rq_unlock(rq, p_rq); 4288 goto again; 4289 } 4290 4291 if (!curr->sched_class->yield_to_task) 4292 goto out_unlock; 4293 4294 if (curr->sched_class != p->sched_class) 4295 goto out_unlock; 4296 4297 if (task_running(p_rq, p) || p->state) 4298 goto out_unlock; 4299 4300 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4301 if (yielded) { 4302 schedstat_inc(rq, yld_count); 4303 /* 4304 * Make p's CPU reschedule; pick_next_entity takes care of 4305 * fairness. 4306 */ 4307 if (preempt && rq != p_rq) 4308 resched_curr(p_rq); 4309 } 4310 4311out_unlock: 4312 double_rq_unlock(rq, p_rq); 4313out_irq: 4314 local_irq_restore(flags); 4315 4316 if (yielded > 0) 4317 schedule(); 4318 4319 return yielded; 4320} 4321EXPORT_SYMBOL_GPL(yield_to); 4322 4323/* 4324 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4325 * that process accounting knows that this is a task in IO wait state. 4326 */ 4327void __sched io_schedule(void) 4328{ 4329 struct rq *rq = raw_rq(); 4330 4331 delayacct_blkio_start(); 4332 atomic_inc(&rq->nr_iowait); 4333 blk_flush_plug(current); 4334 current->in_iowait = 1; 4335 schedule(); 4336 current->in_iowait = 0; 4337 atomic_dec(&rq->nr_iowait); 4338 delayacct_blkio_end(); 4339} 4340EXPORT_SYMBOL(io_schedule); 4341 4342long __sched io_schedule_timeout(long timeout) 4343{ 4344 struct rq *rq = raw_rq(); 4345 long ret; 4346 4347 delayacct_blkio_start(); 4348 atomic_inc(&rq->nr_iowait); 4349 blk_flush_plug(current); 4350 current->in_iowait = 1; 4351 ret = schedule_timeout(timeout); 4352 current->in_iowait = 0; 4353 atomic_dec(&rq->nr_iowait); 4354 delayacct_blkio_end(); 4355 return ret; 4356} 4357 4358/** 4359 * sys_sched_get_priority_max - return maximum RT priority. 4360 * @policy: scheduling class. 4361 * 4362 * Return: On success, this syscall returns the maximum 4363 * rt_priority that can be used by a given scheduling class. 4364 * On failure, a negative error code is returned. 4365 */ 4366SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4367{ 4368 int ret = -EINVAL; 4369 4370 switch (policy) { 4371 case SCHED_FIFO: 4372 case SCHED_RR: 4373 ret = MAX_USER_RT_PRIO-1; 4374 break; 4375 case SCHED_DEADLINE: 4376 case SCHED_NORMAL: 4377 case SCHED_BATCH: 4378 case SCHED_IDLE: 4379 ret = 0; 4380 break; 4381 } 4382 return ret; 4383} 4384 4385/** 4386 * sys_sched_get_priority_min - return minimum RT priority. 4387 * @policy: scheduling class. 4388 * 4389 * Return: On success, this syscall returns the minimum 4390 * rt_priority that can be used by a given scheduling class. 4391 * On failure, a negative error code is returned. 4392 */ 4393SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4394{ 4395 int ret = -EINVAL; 4396 4397 switch (policy) { 4398 case SCHED_FIFO: 4399 case SCHED_RR: 4400 ret = 1; 4401 break; 4402 case SCHED_DEADLINE: 4403 case SCHED_NORMAL: 4404 case SCHED_BATCH: 4405 case SCHED_IDLE: 4406 ret = 0; 4407 } 4408 return ret; 4409} 4410 4411/** 4412 * sys_sched_rr_get_interval - return the default timeslice of a process. 4413 * @pid: pid of the process. 4414 * @interval: userspace pointer to the timeslice value. 4415 * 4416 * this syscall writes the default timeslice value of a given process 4417 * into the user-space timespec buffer. A value of '0' means infinity. 4418 * 4419 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 4420 * an error code. 4421 */ 4422SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4423 struct timespec __user *, interval) 4424{ 4425 struct task_struct *p; 4426 unsigned int time_slice; 4427 unsigned long flags; 4428 struct rq *rq; 4429 int retval; 4430 struct timespec t; 4431 4432 if (pid < 0) 4433 return -EINVAL; 4434 4435 retval = -ESRCH; 4436 rcu_read_lock(); 4437 p = find_process_by_pid(pid); 4438 if (!p) 4439 goto out_unlock; 4440 4441 retval = security_task_getscheduler(p); 4442 if (retval) 4443 goto out_unlock; 4444 4445 rq = task_rq_lock(p, &flags); 4446 time_slice = 0; 4447 if (p->sched_class->get_rr_interval) 4448 time_slice = p->sched_class->get_rr_interval(rq, p); 4449 task_rq_unlock(rq, p, &flags); 4450 4451 rcu_read_unlock(); 4452 jiffies_to_timespec(time_slice, &t); 4453 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4454 return retval; 4455 4456out_unlock: 4457 rcu_read_unlock(); 4458 return retval; 4459} 4460 4461static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 4462 4463void sched_show_task(struct task_struct *p) 4464{ 4465 unsigned long free = 0; 4466 int ppid; 4467 unsigned state; 4468 4469 state = p->state ? __ffs(p->state) + 1 : 0; 4470 printk(KERN_INFO "%-15.15s %c", p->comm, 4471 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4472#if BITS_PER_LONG == 32 4473 if (state == TASK_RUNNING) 4474 printk(KERN_CONT " running "); 4475 else 4476 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 4477#else 4478 if (state == TASK_RUNNING) 4479 printk(KERN_CONT " running task "); 4480 else 4481 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 4482#endif 4483#ifdef CONFIG_DEBUG_STACK_USAGE 4484 free = stack_not_used(p); 4485#endif 4486 rcu_read_lock(); 4487 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 4488 rcu_read_unlock(); 4489 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 4490 task_pid_nr(p), ppid, 4491 (unsigned long)task_thread_info(p)->flags); 4492 4493 print_worker_info(KERN_INFO, p); 4494 show_stack(p, NULL); 4495} 4496 4497void show_state_filter(unsigned long state_filter) 4498{ 4499 struct task_struct *g, *p; 4500 4501#if BITS_PER_LONG == 32 4502 printk(KERN_INFO 4503 " task PC stack pid father\n"); 4504#else 4505 printk(KERN_INFO 4506 " task PC stack pid father\n"); 4507#endif 4508 rcu_read_lock(); 4509 do_each_thread(g, p) { 4510 /* 4511 * reset the NMI-timeout, listing all files on a slow 4512 * console might take a lot of time: 4513 */ 4514 touch_nmi_watchdog(); 4515 if (!state_filter || (p->state & state_filter)) 4516 sched_show_task(p); 4517 } while_each_thread(g, p); 4518 4519 touch_all_softlockup_watchdogs(); 4520 4521#ifdef CONFIG_SCHED_DEBUG 4522 sysrq_sched_debug_show(); 4523#endif 4524 rcu_read_unlock(); 4525 /* 4526 * Only show locks if all tasks are dumped: 4527 */ 4528 if (!state_filter) 4529 debug_show_all_locks(); 4530} 4531 4532void init_idle_bootup_task(struct task_struct *idle) 4533{ 4534 idle->sched_class = &idle_sched_class; 4535} 4536 4537/** 4538 * init_idle - set up an idle thread for a given CPU 4539 * @idle: task in question 4540 * @cpu: cpu the idle task belongs to 4541 * 4542 * NOTE: this function does not set the idle thread's NEED_RESCHED 4543 * flag, to make booting more robust. 4544 */ 4545void init_idle(struct task_struct *idle, int cpu) 4546{ 4547 struct rq *rq = cpu_rq(cpu); 4548 unsigned long flags; 4549 4550 raw_spin_lock_irqsave(&rq->lock, flags); 4551 4552 __sched_fork(0, idle); 4553 idle->state = TASK_RUNNING; 4554 idle->se.exec_start = sched_clock(); 4555 4556 do_set_cpus_allowed(idle, cpumask_of(cpu)); 4557 /* 4558 * We're having a chicken and egg problem, even though we are 4559 * holding rq->lock, the cpu isn't yet set to this cpu so the 4560 * lockdep check in task_group() will fail. 4561 * 4562 * Similar case to sched_fork(). / Alternatively we could 4563 * use task_rq_lock() here and obtain the other rq->lock. 4564 * 4565 * Silence PROVE_RCU 4566 */ 4567 rcu_read_lock(); 4568 __set_task_cpu(idle, cpu); 4569 rcu_read_unlock(); 4570 4571 rq->curr = rq->idle = idle; 4572 idle->on_rq = 1; 4573#if defined(CONFIG_SMP) 4574 idle->on_cpu = 1; 4575#endif 4576 raw_spin_unlock_irqrestore(&rq->lock, flags); 4577 4578 /* Set the preempt count _outside_ the spinlocks! */ 4579 init_idle_preempt_count(idle, cpu); 4580 4581 /* 4582 * The idle tasks have their own, simple scheduling class: 4583 */ 4584 idle->sched_class = &idle_sched_class; 4585 ftrace_graph_init_idle_task(idle, cpu); 4586 vtime_init_idle(idle, cpu); 4587#if defined(CONFIG_SMP) 4588 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4589#endif 4590} 4591 4592#ifdef CONFIG_SMP 4593void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 4594{ 4595 if (p->sched_class && p->sched_class->set_cpus_allowed) 4596 p->sched_class->set_cpus_allowed(p, new_mask); 4597 4598 cpumask_copy(&p->cpus_allowed, new_mask); 4599 p->nr_cpus_allowed = cpumask_weight(new_mask); 4600} 4601 4602/* 4603 * This is how migration works: 4604 * 4605 * 1) we invoke migration_cpu_stop() on the target CPU using 4606 * stop_one_cpu(). 4607 * 2) stopper starts to run (implicitly forcing the migrated thread 4608 * off the CPU) 4609 * 3) it checks whether the migrated task is still in the wrong runqueue. 4610 * 4) if it's in the wrong runqueue then the migration thread removes 4611 * it and puts it into the right queue. 4612 * 5) stopper completes and stop_one_cpu() returns and the migration 4613 * is done. 4614 */ 4615 4616/* 4617 * Change a given task's CPU affinity. Migrate the thread to a 4618 * proper CPU and schedule it away if the CPU it's executing on 4619 * is removed from the allowed bitmask. 4620 * 4621 * NOTE: the caller must have a valid reference to the task, the 4622 * task must not exit() & deallocate itself prematurely. The 4623 * call is not atomic; no spinlocks may be held. 4624 */ 4625int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 4626{ 4627 unsigned long flags; 4628 struct rq *rq; 4629 unsigned int dest_cpu; 4630 int ret = 0; 4631 4632 rq = task_rq_lock(p, &flags); 4633 4634 if (cpumask_equal(&p->cpus_allowed, new_mask)) 4635 goto out; 4636 4637 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 4638 ret = -EINVAL; 4639 goto out; 4640 } 4641 4642 do_set_cpus_allowed(p, new_mask); 4643 4644 /* Can the task run on the task's current CPU? If so, we're done */ 4645 if (cpumask_test_cpu(task_cpu(p), new_mask)) 4646 goto out; 4647 4648 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 4649 if (p->on_rq) { 4650 struct migration_arg arg = { p, dest_cpu }; 4651 /* Need help from migration thread: drop lock and wait. */ 4652 task_rq_unlock(rq, p, &flags); 4653 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 4654 tlb_migrate_finish(p->mm); 4655 return 0; 4656 } 4657out: 4658 task_rq_unlock(rq, p, &flags); 4659 4660 return ret; 4661} 4662EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 4663 4664/* 4665 * Move (not current) task off this cpu, onto dest cpu. We're doing 4666 * this because either it can't run here any more (set_cpus_allowed() 4667 * away from this CPU, or CPU going down), or because we're 4668 * attempting to rebalance this task on exec (sched_exec). 4669 * 4670 * So we race with normal scheduler movements, but that's OK, as long 4671 * as the task is no longer on this CPU. 4672 * 4673 * Returns non-zero if task was successfully migrated. 4674 */ 4675static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 4676{ 4677 struct rq *rq_dest, *rq_src; 4678 int ret = 0; 4679 4680 if (unlikely(!cpu_active(dest_cpu))) 4681 return ret; 4682 4683 rq_src = cpu_rq(src_cpu); 4684 rq_dest = cpu_rq(dest_cpu); 4685 4686 raw_spin_lock(&p->pi_lock); 4687 double_rq_lock(rq_src, rq_dest); 4688 /* Already moved. */ 4689 if (task_cpu(p) != src_cpu) 4690 goto done; 4691 /* Affinity changed (again). */ 4692 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 4693 goto fail; 4694 4695 /* 4696 * If we're not on a rq, the next wake-up will ensure we're 4697 * placed properly. 4698 */ 4699 if (p->on_rq) { 4700 dequeue_task(rq_src, p, 0); 4701 set_task_cpu(p, dest_cpu); 4702 enqueue_task(rq_dest, p, 0); 4703 check_preempt_curr(rq_dest, p, 0); 4704 } 4705done: 4706 ret = 1; 4707fail: 4708 double_rq_unlock(rq_src, rq_dest); 4709 raw_spin_unlock(&p->pi_lock); 4710 return ret; 4711} 4712 4713#ifdef CONFIG_NUMA_BALANCING 4714/* Migrate current task p to target_cpu */ 4715int migrate_task_to(struct task_struct *p, int target_cpu) 4716{ 4717 struct migration_arg arg = { p, target_cpu }; 4718 int curr_cpu = task_cpu(p); 4719 4720 if (curr_cpu == target_cpu) 4721 return 0; 4722 4723 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) 4724 return -EINVAL; 4725 4726 /* TODO: This is not properly updating schedstats */ 4727 4728 trace_sched_move_numa(p, curr_cpu, target_cpu); 4729 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 4730} 4731 4732/* 4733 * Requeue a task on a given node and accurately track the number of NUMA 4734 * tasks on the runqueues 4735 */ 4736void sched_setnuma(struct task_struct *p, int nid) 4737{ 4738 struct rq *rq; 4739 unsigned long flags; 4740 bool on_rq, running; 4741 4742 rq = task_rq_lock(p, &flags); 4743 on_rq = p->on_rq; 4744 running = task_current(rq, p); 4745 4746 if (on_rq) 4747 dequeue_task(rq, p, 0); 4748 if (running) 4749 p->sched_class->put_prev_task(rq, p); 4750 4751 p->numa_preferred_nid = nid; 4752 4753 if (running) 4754 p->sched_class->set_curr_task(rq); 4755 if (on_rq) 4756 enqueue_task(rq, p, 0); 4757 task_rq_unlock(rq, p, &flags); 4758} 4759#endif 4760 4761/* 4762 * migration_cpu_stop - this will be executed by a highprio stopper thread 4763 * and performs thread migration by bumping thread off CPU then 4764 * 'pushing' onto another runqueue. 4765 */ 4766static int migration_cpu_stop(void *data) 4767{ 4768 struct migration_arg *arg = data; 4769 4770 /* 4771 * The original target cpu might have gone down and we might 4772 * be on another cpu but it doesn't matter. 4773 */ 4774 local_irq_disable(); 4775 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); 4776 local_irq_enable(); 4777 return 0; 4778} 4779 4780#ifdef CONFIG_HOTPLUG_CPU 4781 4782/* 4783 * Ensures that the idle task is using init_mm right before its cpu goes 4784 * offline. 4785 */ 4786void idle_task_exit(void) 4787{ 4788 struct mm_struct *mm = current->active_mm; 4789 4790 BUG_ON(cpu_online(smp_processor_id())); 4791 4792 if (mm != &init_mm) { 4793 switch_mm(mm, &init_mm, current); 4794 finish_arch_post_lock_switch(); 4795 } 4796 mmdrop(mm); 4797} 4798 4799/* 4800 * Since this CPU is going 'away' for a while, fold any nr_active delta 4801 * we might have. Assumes we're called after migrate_tasks() so that the 4802 * nr_active count is stable. 4803 * 4804 * Also see the comment "Global load-average calculations". 4805 */ 4806static void calc_load_migrate(struct rq *rq) 4807{ 4808 long delta = calc_load_fold_active(rq); 4809 if (delta) 4810 atomic_long_add(delta, &calc_load_tasks); 4811} 4812 4813static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) 4814{ 4815} 4816 4817static const struct sched_class fake_sched_class = { 4818 .put_prev_task = put_prev_task_fake, 4819}; 4820 4821static struct task_struct fake_task = { 4822 /* 4823 * Avoid pull_{rt,dl}_task() 4824 */ 4825 .prio = MAX_PRIO + 1, 4826 .sched_class = &fake_sched_class, 4827}; 4828 4829/* 4830 * Migrate all tasks from the rq, sleeping tasks will be migrated by 4831 * try_to_wake_up()->select_task_rq(). 4832 * 4833 * Called with rq->lock held even though we'er in stop_machine() and 4834 * there's no concurrency possible, we hold the required locks anyway 4835 * because of lock validation efforts. 4836 */ 4837static void migrate_tasks(unsigned int dead_cpu) 4838{ 4839 struct rq *rq = cpu_rq(dead_cpu); 4840 struct task_struct *next, *stop = rq->stop; 4841 int dest_cpu; 4842 4843 /* 4844 * Fudge the rq selection such that the below task selection loop 4845 * doesn't get stuck on the currently eligible stop task. 4846 * 4847 * We're currently inside stop_machine() and the rq is either stuck 4848 * in the stop_machine_cpu_stop() loop, or we're executing this code, 4849 * either way we should never end up calling schedule() until we're 4850 * done here. 4851 */ 4852 rq->stop = NULL; 4853 4854 /* 4855 * put_prev_task() and pick_next_task() sched 4856 * class method both need to have an up-to-date 4857 * value of rq->clock[_task] 4858 */ 4859 update_rq_clock(rq); 4860 4861 for ( ; ; ) { 4862 /* 4863 * There's this thread running, bail when that's the only 4864 * remaining thread. 4865 */ 4866 if (rq->nr_running == 1) 4867 break; 4868 4869 next = pick_next_task(rq, &fake_task); 4870 BUG_ON(!next); 4871 next->sched_class->put_prev_task(rq, next); 4872 4873 /* Find suitable destination for @next, with force if needed. */ 4874 dest_cpu = select_fallback_rq(dead_cpu, next); 4875 raw_spin_unlock(&rq->lock); 4876 4877 __migrate_task(next, dead_cpu, dest_cpu); 4878 4879 raw_spin_lock(&rq->lock); 4880 } 4881 4882 rq->stop = stop; 4883} 4884 4885#endif /* CONFIG_HOTPLUG_CPU */ 4886 4887#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 4888 4889static struct ctl_table sd_ctl_dir[] = { 4890 { 4891 .procname = "sched_domain", 4892 .mode = 0555, 4893 }, 4894 {} 4895}; 4896 4897static struct ctl_table sd_ctl_root[] = { 4898 { 4899 .procname = "kernel", 4900 .mode = 0555, 4901 .child = sd_ctl_dir, 4902 }, 4903 {} 4904}; 4905 4906static struct ctl_table *sd_alloc_ctl_entry(int n) 4907{ 4908 struct ctl_table *entry = 4909 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 4910 4911 return entry; 4912} 4913 4914static void sd_free_ctl_entry(struct ctl_table **tablep) 4915{ 4916 struct ctl_table *entry; 4917 4918 /* 4919 * In the intermediate directories, both the child directory and 4920 * procname are dynamically allocated and could fail but the mode 4921 * will always be set. In the lowest directory the names are 4922 * static strings and all have proc handlers. 4923 */ 4924 for (entry = *tablep; entry->mode; entry++) { 4925 if (entry->child) 4926 sd_free_ctl_entry(&entry->child); 4927 if (entry->proc_handler == NULL) 4928 kfree(entry->procname); 4929 } 4930 4931 kfree(*tablep); 4932 *tablep = NULL; 4933} 4934 4935static int min_load_idx = 0; 4936static int max_load_idx = CPU_LOAD_IDX_MAX-1; 4937 4938static void 4939set_table_entry(struct ctl_table *entry, 4940 const char *procname, void *data, int maxlen, 4941 umode_t mode, proc_handler *proc_handler, 4942 bool load_idx) 4943{ 4944 entry->procname = procname; 4945 entry->data = data; 4946 entry->maxlen = maxlen; 4947 entry->mode = mode; 4948 entry->proc_handler = proc_handler; 4949 4950 if (load_idx) { 4951 entry->extra1 = &min_load_idx; 4952 entry->extra2 = &max_load_idx; 4953 } 4954} 4955 4956static struct ctl_table * 4957sd_alloc_ctl_domain_table(struct sched_domain *sd) 4958{ 4959 struct ctl_table *table = sd_alloc_ctl_entry(14); 4960 4961 if (table == NULL) 4962 return NULL; 4963 4964 set_table_entry(&table[0], "min_interval", &sd->min_interval, 4965 sizeof(long), 0644, proc_doulongvec_minmax, false); 4966 set_table_entry(&table[1], "max_interval", &sd->max_interval, 4967 sizeof(long), 0644, proc_doulongvec_minmax, false); 4968 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, 4969 sizeof(int), 0644, proc_dointvec_minmax, true); 4970 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, 4971 sizeof(int), 0644, proc_dointvec_minmax, true); 4972 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, 4973 sizeof(int), 0644, proc_dointvec_minmax, true); 4974 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, 4975 sizeof(int), 0644, proc_dointvec_minmax, true); 4976 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, 4977 sizeof(int), 0644, proc_dointvec_minmax, true); 4978 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, 4979 sizeof(int), 0644, proc_dointvec_minmax, false); 4980 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, 4981 sizeof(int), 0644, proc_dointvec_minmax, false); 4982 set_table_entry(&table[9], "cache_nice_tries", 4983 &sd->cache_nice_tries, 4984 sizeof(int), 0644, proc_dointvec_minmax, false); 4985 set_table_entry(&table[10], "flags", &sd->flags, 4986 sizeof(int), 0644, proc_dointvec_minmax, false); 4987 set_table_entry(&table[11], "max_newidle_lb_cost", 4988 &sd->max_newidle_lb_cost, 4989 sizeof(long), 0644, proc_doulongvec_minmax, false); 4990 set_table_entry(&table[12], "name", sd->name, 4991 CORENAME_MAX_SIZE, 0444, proc_dostring, false); 4992 /* &table[13] is terminator */ 4993 4994 return table; 4995} 4996 4997static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) 4998{ 4999 struct ctl_table *entry, *table; 5000 struct sched_domain *sd; 5001 int domain_num = 0, i; 5002 char buf[32]; 5003 5004 for_each_domain(cpu, sd) 5005 domain_num++; 5006 entry = table = sd_alloc_ctl_entry(domain_num + 1); 5007 if (table == NULL) 5008 return NULL; 5009 5010 i = 0; 5011 for_each_domain(cpu, sd) { 5012 snprintf(buf, 32, "domain%d", i); 5013 entry->procname = kstrdup(buf, GFP_KERNEL); 5014 entry->mode = 0555; 5015 entry->child = sd_alloc_ctl_domain_table(sd); 5016 entry++; 5017 i++; 5018 } 5019 return table; 5020} 5021 5022static struct ctl_table_header *sd_sysctl_header; 5023static void register_sched_domain_sysctl(void) 5024{ 5025 int i, cpu_num = num_possible_cpus(); 5026 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 5027 char buf[32]; 5028 5029 WARN_ON(sd_ctl_dir[0].child); 5030 sd_ctl_dir[0].child = entry; 5031 5032 if (entry == NULL) 5033 return; 5034 5035 for_each_possible_cpu(i) { 5036 snprintf(buf, 32, "cpu%d", i); 5037 entry->procname = kstrdup(buf, GFP_KERNEL); 5038 entry->mode = 0555; 5039 entry->child = sd_alloc_ctl_cpu_table(i); 5040 entry++; 5041 } 5042 5043 WARN_ON(sd_sysctl_header); 5044 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 5045} 5046 5047/* may be called multiple times per register */ 5048static void unregister_sched_domain_sysctl(void) 5049{ 5050 if (sd_sysctl_header) 5051 unregister_sysctl_table(sd_sysctl_header); 5052 sd_sysctl_header = NULL; 5053 if (sd_ctl_dir[0].child) 5054 sd_free_ctl_entry(&sd_ctl_dir[0].child); 5055} 5056#else 5057static void register_sched_domain_sysctl(void) 5058{ 5059} 5060static void unregister_sched_domain_sysctl(void) 5061{ 5062} 5063#endif 5064 5065static void set_rq_online(struct rq *rq) 5066{ 5067 if (!rq->online) { 5068 const struct sched_class *class; 5069 5070 cpumask_set_cpu(rq->cpu, rq->rd->online); 5071 rq->online = 1; 5072 5073 for_each_class(class) { 5074 if (class->rq_online) 5075 class->rq_online(rq); 5076 } 5077 } 5078} 5079 5080static void set_rq_offline(struct rq *rq) 5081{ 5082 if (rq->online) { 5083 const struct sched_class *class; 5084 5085 for_each_class(class) { 5086 if (class->rq_offline) 5087 class->rq_offline(rq); 5088 } 5089 5090 cpumask_clear_cpu(rq->cpu, rq->rd->online); 5091 rq->online = 0; 5092 } 5093} 5094 5095/* 5096 * migration_call - callback that gets triggered when a CPU is added. 5097 * Here we can start up the necessary migration thread for the new CPU. 5098 */ 5099static int 5100migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5101{ 5102 int cpu = (long)hcpu; 5103 unsigned long flags; 5104 struct rq *rq = cpu_rq(cpu); 5105 5106 switch (action & ~CPU_TASKS_FROZEN) { 5107 5108 case CPU_UP_PREPARE: 5109 rq->calc_load_update = calc_load_update; 5110 break; 5111 5112 case CPU_ONLINE: 5113 /* Update our root-domain */ 5114 raw_spin_lock_irqsave(&rq->lock, flags); 5115 if (rq->rd) { 5116 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5117 5118 set_rq_online(rq); 5119 } 5120 raw_spin_unlock_irqrestore(&rq->lock, flags); 5121 break; 5122 5123#ifdef CONFIG_HOTPLUG_CPU 5124 case CPU_DYING: 5125 sched_ttwu_pending(); 5126 /* Update our root-domain */ 5127 raw_spin_lock_irqsave(&rq->lock, flags); 5128 if (rq->rd) { 5129 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5130 set_rq_offline(rq); 5131 } 5132 migrate_tasks(cpu); 5133 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5134 raw_spin_unlock_irqrestore(&rq->lock, flags); 5135 break; 5136 5137 case CPU_DEAD: 5138 calc_load_migrate(rq); 5139 break; 5140#endif 5141 } 5142 5143 update_max_interval(); 5144 5145 return NOTIFY_OK; 5146} 5147 5148/* 5149 * Register at high priority so that task migration (migrate_all_tasks) 5150 * happens before everything else. This has to be lower priority than 5151 * the notifier in the perf_event subsystem, though. 5152 */ 5153static struct notifier_block migration_notifier = { 5154 .notifier_call = migration_call, 5155 .priority = CPU_PRI_MIGRATION, 5156}; 5157 5158static void __cpuinit set_cpu_rq_start_time(void) 5159{ 5160 int cpu = smp_processor_id(); 5161 struct rq *rq = cpu_rq(cpu); 5162 rq->age_stamp = sched_clock_cpu(cpu); 5163} 5164 5165static int sched_cpu_active(struct notifier_block *nfb, 5166 unsigned long action, void *hcpu) 5167{ 5168 switch (action & ~CPU_TASKS_FROZEN) { 5169 case CPU_STARTING: 5170 set_cpu_rq_start_time(); 5171 return NOTIFY_OK; 5172 case CPU_DOWN_FAILED: 5173 set_cpu_active((long)hcpu, true); 5174 return NOTIFY_OK; 5175 default: 5176 return NOTIFY_DONE; 5177 } 5178} 5179 5180static int sched_cpu_inactive(struct notifier_block *nfb, 5181 unsigned long action, void *hcpu) 5182{ 5183 unsigned long flags; 5184 long cpu = (long)hcpu; 5185 5186 switch (action & ~CPU_TASKS_FROZEN) { 5187 case CPU_DOWN_PREPARE: 5188 set_cpu_active(cpu, false); 5189 5190 /* explicitly allow suspend */ 5191 if (!(action & CPU_TASKS_FROZEN)) { 5192 struct dl_bw *dl_b = dl_bw_of(cpu); 5193 bool overflow; 5194 int cpus; 5195 5196 raw_spin_lock_irqsave(&dl_b->lock, flags); 5197 cpus = dl_bw_cpus(cpu); 5198 overflow = __dl_overflow(dl_b, cpus, 0, 0); 5199 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 5200 5201 if (overflow) 5202 return notifier_from_errno(-EBUSY); 5203 } 5204 return NOTIFY_OK; 5205 } 5206 5207 return NOTIFY_DONE; 5208} 5209 5210static int __init migration_init(void) 5211{ 5212 void *cpu = (void *)(long)smp_processor_id(); 5213 int err; 5214 5215 /* Initialize migration for the boot CPU */ 5216 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 5217 BUG_ON(err == NOTIFY_BAD); 5218 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5219 register_cpu_notifier(&migration_notifier); 5220 5221 /* Register cpu active notifiers */ 5222 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); 5223 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); 5224 5225 return 0; 5226} 5227early_initcall(migration_init); 5228#endif 5229 5230#ifdef CONFIG_SMP 5231 5232static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ 5233 5234#ifdef CONFIG_SCHED_DEBUG 5235 5236static __read_mostly int sched_debug_enabled; 5237 5238static int __init sched_debug_setup(char *str) 5239{ 5240 sched_debug_enabled = 1; 5241 5242 return 0; 5243} 5244early_param("sched_debug", sched_debug_setup); 5245 5246static inline bool sched_debug(void) 5247{ 5248 return sched_debug_enabled; 5249} 5250 5251static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5252 struct cpumask *groupmask) 5253{ 5254 struct sched_group *group = sd->groups; 5255 char str[256]; 5256 5257 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); 5258 cpumask_clear(groupmask); 5259 5260 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 5261 5262 if (!(sd->flags & SD_LOAD_BALANCE)) { 5263 printk("does not load-balance\n"); 5264 if (sd->parent) 5265 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 5266 " has parent"); 5267 return -1; 5268 } 5269 5270 printk(KERN_CONT "span %s level %s\n", str, sd->name); 5271 5272 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 5273 printk(KERN_ERR "ERROR: domain->span does not contain " 5274 "CPU%d\n", cpu); 5275 } 5276 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 5277 printk(KERN_ERR "ERROR: domain->groups does not contain" 5278 " CPU%d\n", cpu); 5279 } 5280 5281 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 5282 do { 5283 if (!group) { 5284 printk("\n"); 5285 printk(KERN_ERR "ERROR: group is NULL\n"); 5286 break; 5287 } 5288 5289 /* 5290 * Even though we initialize ->capacity to something semi-sane, 5291 * we leave capacity_orig unset. This allows us to detect if 5292 * domain iteration is still funny without causing /0 traps. 5293 */ 5294 if (!group->sgc->capacity_orig) { 5295 printk(KERN_CONT "\n"); 5296 printk(KERN_ERR "ERROR: domain->cpu_capacity not set\n"); 5297 break; 5298 } 5299 5300 if (!cpumask_weight(sched_group_cpus(group))) { 5301 printk(KERN_CONT "\n"); 5302 printk(KERN_ERR "ERROR: empty group\n"); 5303 break; 5304 } 5305 5306 if (!(sd->flags & SD_OVERLAP) && 5307 cpumask_intersects(groupmask, sched_group_cpus(group))) { 5308 printk(KERN_CONT "\n"); 5309 printk(KERN_ERR "ERROR: repeated CPUs\n"); 5310 break; 5311 } 5312 5313 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 5314 5315 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 5316 5317 printk(KERN_CONT " %s", str); 5318 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 5319 printk(KERN_CONT " (cpu_capacity = %d)", 5320 group->sgc->capacity); 5321 } 5322 5323 group = group->next; 5324 } while (group != sd->groups); 5325 printk(KERN_CONT "\n"); 5326 5327 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 5328 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5329 5330 if (sd->parent && 5331 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 5332 printk(KERN_ERR "ERROR: parent span is not a superset " 5333 "of domain->span\n"); 5334 return 0; 5335} 5336 5337static void sched_domain_debug(struct sched_domain *sd, int cpu) 5338{ 5339 int level = 0; 5340 5341 if (!sched_debug_enabled) 5342 return; 5343 5344 if (!sd) { 5345 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 5346 return; 5347 } 5348 5349 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 5350 5351 for (;;) { 5352 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 5353 break; 5354 level++; 5355 sd = sd->parent; 5356 if (!sd) 5357 break; 5358 } 5359} 5360#else /* !CONFIG_SCHED_DEBUG */ 5361# define sched_domain_debug(sd, cpu) do { } while (0) 5362static inline bool sched_debug(void) 5363{ 5364 return false; 5365} 5366#endif /* CONFIG_SCHED_DEBUG */ 5367 5368static int sd_degenerate(struct sched_domain *sd) 5369{ 5370 if (cpumask_weight(sched_domain_span(sd)) == 1) 5371 return 1; 5372 5373 /* Following flags need at least 2 groups */ 5374 if (sd->flags & (SD_LOAD_BALANCE | 5375 SD_BALANCE_NEWIDLE | 5376 SD_BALANCE_FORK | 5377 SD_BALANCE_EXEC | 5378 SD_SHARE_CPUCAPACITY | 5379 SD_SHARE_PKG_RESOURCES | 5380 SD_SHARE_POWERDOMAIN)) { 5381 if (sd->groups != sd->groups->next) 5382 return 0; 5383 } 5384 5385 /* Following flags don't use groups */ 5386 if (sd->flags & (SD_WAKE_AFFINE)) 5387 return 0; 5388 5389 return 1; 5390} 5391 5392static int 5393sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 5394{ 5395 unsigned long cflags = sd->flags, pflags = parent->flags; 5396 5397 if (sd_degenerate(parent)) 5398 return 1; 5399 5400 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 5401 return 0; 5402 5403 /* Flags needing groups don't count if only 1 group in parent */ 5404 if (parent->groups == parent->groups->next) { 5405 pflags &= ~(SD_LOAD_BALANCE | 5406 SD_BALANCE_NEWIDLE | 5407 SD_BALANCE_FORK | 5408 SD_BALANCE_EXEC | 5409 SD_SHARE_CPUCAPACITY | 5410 SD_SHARE_PKG_RESOURCES | 5411 SD_PREFER_SIBLING | 5412 SD_SHARE_POWERDOMAIN); 5413 if (nr_node_ids == 1) 5414 pflags &= ~SD_SERIALIZE; 5415 } 5416 if (~cflags & pflags) 5417 return 0; 5418 5419 return 1; 5420} 5421 5422static void free_rootdomain(struct rcu_head *rcu) 5423{ 5424 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 5425 5426 cpupri_cleanup(&rd->cpupri); 5427 cpudl_cleanup(&rd->cpudl); 5428 free_cpumask_var(rd->dlo_mask); 5429 free_cpumask_var(rd->rto_mask); 5430 free_cpumask_var(rd->online); 5431 free_cpumask_var(rd->span); 5432 kfree(rd); 5433} 5434 5435static void rq_attach_root(struct rq *rq, struct root_domain *rd) 5436{ 5437 struct root_domain *old_rd = NULL; 5438 unsigned long flags; 5439 5440 raw_spin_lock_irqsave(&rq->lock, flags); 5441 5442 if (rq->rd) { 5443 old_rd = rq->rd; 5444 5445 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 5446 set_rq_offline(rq); 5447 5448 cpumask_clear_cpu(rq->cpu, old_rd->span); 5449 5450 /* 5451 * If we dont want to free the old_rd yet then 5452 * set old_rd to NULL to skip the freeing later 5453 * in this function: 5454 */ 5455 if (!atomic_dec_and_test(&old_rd->refcount)) 5456 old_rd = NULL; 5457 } 5458 5459 atomic_inc(&rd->refcount); 5460 rq->rd = rd; 5461 5462 cpumask_set_cpu(rq->cpu, rd->span); 5463 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 5464 set_rq_online(rq); 5465 5466 raw_spin_unlock_irqrestore(&rq->lock, flags); 5467 5468 if (old_rd) 5469 call_rcu_sched(&old_rd->rcu, free_rootdomain); 5470} 5471 5472static int init_rootdomain(struct root_domain *rd) 5473{ 5474 memset(rd, 0, sizeof(*rd)); 5475 5476 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 5477 goto out; 5478 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 5479 goto free_span; 5480 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5481 goto free_online; 5482 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5483 goto free_dlo_mask; 5484 5485 init_dl_bw(&rd->dl_bw); 5486 if (cpudl_init(&rd->cpudl) != 0) 5487 goto free_dlo_mask; 5488 5489 if (cpupri_init(&rd->cpupri) != 0) 5490 goto free_rto_mask; 5491 return 0; 5492 5493free_rto_mask: 5494 free_cpumask_var(rd->rto_mask); 5495free_dlo_mask: 5496 free_cpumask_var(rd->dlo_mask); 5497free_online: 5498 free_cpumask_var(rd->online); 5499free_span: 5500 free_cpumask_var(rd->span); 5501out: 5502 return -ENOMEM; 5503} 5504 5505/* 5506 * By default the system creates a single root-domain with all cpus as 5507 * members (mimicking the global state we have today). 5508 */ 5509struct root_domain def_root_domain; 5510 5511static void init_defrootdomain(void) 5512{ 5513 init_rootdomain(&def_root_domain); 5514 5515 atomic_set(&def_root_domain.refcount, 1); 5516} 5517 5518static struct root_domain *alloc_rootdomain(void) 5519{ 5520 struct root_domain *rd; 5521 5522 rd = kmalloc(sizeof(*rd), GFP_KERNEL); 5523 if (!rd) 5524 return NULL; 5525 5526 if (init_rootdomain(rd) != 0) { 5527 kfree(rd); 5528 return NULL; 5529 } 5530 5531 return rd; 5532} 5533 5534static void free_sched_groups(struct sched_group *sg, int free_sgc) 5535{ 5536 struct sched_group *tmp, *first; 5537 5538 if (!sg) 5539 return; 5540 5541 first = sg; 5542 do { 5543 tmp = sg->next; 5544 5545 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 5546 kfree(sg->sgc); 5547 5548 kfree(sg); 5549 sg = tmp; 5550 } while (sg != first); 5551} 5552 5553static void free_sched_domain(struct rcu_head *rcu) 5554{ 5555 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 5556 5557 /* 5558 * If its an overlapping domain it has private groups, iterate and 5559 * nuke them all. 5560 */ 5561 if (sd->flags & SD_OVERLAP) { 5562 free_sched_groups(sd->groups, 1); 5563 } else if (atomic_dec_and_test(&sd->groups->ref)) { 5564 kfree(sd->groups->sgc); 5565 kfree(sd->groups); 5566 } 5567 kfree(sd); 5568} 5569 5570static void destroy_sched_domain(struct sched_domain *sd, int cpu) 5571{ 5572 call_rcu(&sd->rcu, free_sched_domain); 5573} 5574 5575static void destroy_sched_domains(struct sched_domain *sd, int cpu) 5576{ 5577 for (; sd; sd = sd->parent) 5578 destroy_sched_domain(sd, cpu); 5579} 5580 5581/* 5582 * Keep a special pointer to the highest sched_domain that has 5583 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 5584 * allows us to avoid some pointer chasing select_idle_sibling(). 5585 * 5586 * Also keep a unique ID per domain (we use the first cpu number in 5587 * the cpumask of the domain), this allows us to quickly tell if 5588 * two cpus are in the same cache domain, see cpus_share_cache(). 5589 */ 5590DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5591DEFINE_PER_CPU(int, sd_llc_size); 5592DEFINE_PER_CPU(int, sd_llc_id); 5593DEFINE_PER_CPU(struct sched_domain *, sd_numa); 5594DEFINE_PER_CPU(struct sched_domain *, sd_busy); 5595DEFINE_PER_CPU(struct sched_domain *, sd_asym); 5596 5597static void update_top_cache_domain(int cpu) 5598{ 5599 struct sched_domain *sd; 5600 struct sched_domain *busy_sd = NULL; 5601 int id = cpu; 5602 int size = 1; 5603 5604 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 5605 if (sd) { 5606 id = cpumask_first(sched_domain_span(sd)); 5607 size = cpumask_weight(sched_domain_span(sd)); 5608 busy_sd = sd->parent; /* sd_busy */ 5609 } 5610 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); 5611 5612 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 5613 per_cpu(sd_llc_size, cpu) = size; 5614 per_cpu(sd_llc_id, cpu) = id; 5615 5616 sd = lowest_flag_domain(cpu, SD_NUMA); 5617 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 5618 5619 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 5620 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); 5621} 5622 5623/* 5624 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 5625 * hold the hotplug lock. 5626 */ 5627static void 5628cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 5629{ 5630 struct rq *rq = cpu_rq(cpu); 5631 struct sched_domain *tmp; 5632 5633 /* Remove the sched domains which do not contribute to scheduling. */ 5634 for (tmp = sd; tmp; ) { 5635 struct sched_domain *parent = tmp->parent; 5636 if (!parent) 5637 break; 5638 5639 if (sd_parent_degenerate(tmp, parent)) { 5640 tmp->parent = parent->parent; 5641 if (parent->parent) 5642 parent->parent->child = tmp; 5643 /* 5644 * Transfer SD_PREFER_SIBLING down in case of a 5645 * degenerate parent; the spans match for this 5646 * so the property transfers. 5647 */ 5648 if (parent->flags & SD_PREFER_SIBLING) 5649 tmp->flags |= SD_PREFER_SIBLING; 5650 destroy_sched_domain(parent, cpu); 5651 } else 5652 tmp = tmp->parent; 5653 } 5654 5655 if (sd && sd_degenerate(sd)) { 5656 tmp = sd; 5657 sd = sd->parent; 5658 destroy_sched_domain(tmp, cpu); 5659 if (sd) 5660 sd->child = NULL; 5661 } 5662 5663 sched_domain_debug(sd, cpu); 5664 5665 rq_attach_root(rq, rd); 5666 tmp = rq->sd; 5667 rcu_assign_pointer(rq->sd, sd); 5668 destroy_sched_domains(tmp, cpu); 5669 5670 update_top_cache_domain(cpu); 5671} 5672 5673/* cpus with isolated domains */ 5674static cpumask_var_t cpu_isolated_map; 5675 5676/* Setup the mask of cpus configured for isolated domains */ 5677static int __init isolated_cpu_setup(char *str) 5678{ 5679 alloc_bootmem_cpumask_var(&cpu_isolated_map); 5680 cpulist_parse(str, cpu_isolated_map); 5681 return 1; 5682} 5683 5684__setup("isolcpus=", isolated_cpu_setup); 5685 5686struct s_data { 5687 struct sched_domain ** __percpu sd; 5688 struct root_domain *rd; 5689}; 5690 5691enum s_alloc { 5692 sa_rootdomain, 5693 sa_sd, 5694 sa_sd_storage, 5695 sa_none, 5696}; 5697 5698/* 5699 * Build an iteration mask that can exclude certain CPUs from the upwards 5700 * domain traversal. 5701 * 5702 * Asymmetric node setups can result in situations where the domain tree is of 5703 * unequal depth, make sure to skip domains that already cover the entire 5704 * range. 5705 * 5706 * In that case build_sched_domains() will have terminated the iteration early 5707 * and our sibling sd spans will be empty. Domains should always include the 5708 * cpu they're built on, so check that. 5709 * 5710 */ 5711static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 5712{ 5713 const struct cpumask *span = sched_domain_span(sd); 5714 struct sd_data *sdd = sd->private; 5715 struct sched_domain *sibling; 5716 int i; 5717 5718 for_each_cpu(i, span) { 5719 sibling = *per_cpu_ptr(sdd->sd, i); 5720 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 5721 continue; 5722 5723 cpumask_set_cpu(i, sched_group_mask(sg)); 5724 } 5725} 5726 5727/* 5728 * Return the canonical balance cpu for this group, this is the first cpu 5729 * of this group that's also in the iteration mask. 5730 */ 5731int group_balance_cpu(struct sched_group *sg) 5732{ 5733 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 5734} 5735 5736static int 5737build_overlap_sched_groups(struct sched_domain *sd, int cpu) 5738{ 5739 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; 5740 const struct cpumask *span = sched_domain_span(sd); 5741 struct cpumask *covered = sched_domains_tmpmask; 5742 struct sd_data *sdd = sd->private; 5743 struct sched_domain *child; 5744 int i; 5745 5746 cpumask_clear(covered); 5747 5748 for_each_cpu(i, span) { 5749 struct cpumask *sg_span; 5750 5751 if (cpumask_test_cpu(i, covered)) 5752 continue; 5753 5754 child = *per_cpu_ptr(sdd->sd, i); 5755 5756 /* See the comment near build_group_mask(). */ 5757 if (!cpumask_test_cpu(i, sched_domain_span(child))) 5758 continue; 5759 5760 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 5761 GFP_KERNEL, cpu_to_node(cpu)); 5762 5763 if (!sg) 5764 goto fail; 5765 5766 sg_span = sched_group_cpus(sg); 5767 if (child->child) { 5768 child = child->child; 5769 cpumask_copy(sg_span, sched_domain_span(child)); 5770 } else 5771 cpumask_set_cpu(i, sg_span); 5772 5773 cpumask_or(covered, covered, sg_span); 5774 5775 sg->sgc = *per_cpu_ptr(sdd->sgc, i); 5776 if (atomic_inc_return(&sg->sgc->ref) == 1) 5777 build_group_mask(sd, sg); 5778 5779 /* 5780 * Initialize sgc->capacity such that even if we mess up the 5781 * domains and no possible iteration will get us here, we won't 5782 * die on a /0 trap. 5783 */ 5784 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 5785 sg->sgc->capacity_orig = sg->sgc->capacity; 5786 5787 /* 5788 * Make sure the first group of this domain contains the 5789 * canonical balance cpu. Otherwise the sched_domain iteration 5790 * breaks. See update_sg_lb_stats(). 5791 */ 5792 if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 5793 group_balance_cpu(sg) == cpu) 5794 groups = sg; 5795 5796 if (!first) 5797 first = sg; 5798 if (last) 5799 last->next = sg; 5800 last = sg; 5801 last->next = first; 5802 } 5803 sd->groups = groups; 5804 5805 return 0; 5806 5807fail: 5808 free_sched_groups(first, 0); 5809 5810 return -ENOMEM; 5811} 5812 5813static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 5814{ 5815 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 5816 struct sched_domain *child = sd->child; 5817 5818 if (child) 5819 cpu = cpumask_first(sched_domain_span(child)); 5820 5821 if (sg) { 5822 *sg = *per_cpu_ptr(sdd->sg, cpu); 5823 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); 5824 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ 5825 } 5826 5827 return cpu; 5828} 5829 5830/* 5831 * build_sched_groups will build a circular linked list of the groups 5832 * covered by the given span, and will set each group's ->cpumask correctly, 5833 * and ->cpu_capacity to 0. 5834 * 5835 * Assumes the sched_domain tree is fully constructed 5836 */ 5837static int 5838build_sched_groups(struct sched_domain *sd, int cpu) 5839{ 5840 struct sched_group *first = NULL, *last = NULL; 5841 struct sd_data *sdd = sd->private; 5842 const struct cpumask *span = sched_domain_span(sd); 5843 struct cpumask *covered; 5844 int i; 5845 5846 get_group(cpu, sdd, &sd->groups); 5847 atomic_inc(&sd->groups->ref); 5848 5849 if (cpu != cpumask_first(span)) 5850 return 0; 5851 5852 lockdep_assert_held(&sched_domains_mutex); 5853 covered = sched_domains_tmpmask; 5854 5855 cpumask_clear(covered); 5856 5857 for_each_cpu(i, span) { 5858 struct sched_group *sg; 5859 int group, j; 5860 5861 if (cpumask_test_cpu(i, covered)) 5862 continue; 5863 5864 group = get_group(i, sdd, &sg); 5865 cpumask_setall(sched_group_mask(sg)); 5866 5867 for_each_cpu(j, span) { 5868 if (get_group(j, sdd, NULL) != group) 5869 continue; 5870 5871 cpumask_set_cpu(j, covered); 5872 cpumask_set_cpu(j, sched_group_cpus(sg)); 5873 } 5874 5875 if (!first) 5876 first = sg; 5877 if (last) 5878 last->next = sg; 5879 last = sg; 5880 } 5881 last->next = first; 5882 5883 return 0; 5884} 5885 5886/* 5887 * Initialize sched groups cpu_capacity. 5888 * 5889 * cpu_capacity indicates the capacity of sched group, which is used while 5890 * distributing the load between different sched groups in a sched domain. 5891 * Typically cpu_capacity for all the groups in a sched domain will be same 5892 * unless there are asymmetries in the topology. If there are asymmetries, 5893 * group having more cpu_capacity will pickup more load compared to the 5894 * group having less cpu_capacity. 5895 */ 5896static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 5897{ 5898 struct sched_group *sg = sd->groups; 5899 5900 WARN_ON(!sg); 5901 5902 do { 5903 sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 5904 sg = sg->next; 5905 } while (sg != sd->groups); 5906 5907 if (cpu != group_balance_cpu(sg)) 5908 return; 5909 5910 update_group_capacity(sd, cpu); 5911 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); 5912} 5913 5914/* 5915 * Initializers for schedule domains 5916 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 5917 */ 5918 5919static int default_relax_domain_level = -1; 5920int sched_domain_level_max; 5921 5922static int __init setup_relax_domain_level(char *str) 5923{ 5924 if (kstrtoint(str, 0, &default_relax_domain_level)) 5925 pr_warn("Unable to set relax_domain_level\n"); 5926 5927 return 1; 5928} 5929__setup("relax_domain_level=", setup_relax_domain_level); 5930 5931static void set_domain_attribute(struct sched_domain *sd, 5932 struct sched_domain_attr *attr) 5933{ 5934 int request; 5935 5936 if (!attr || attr->relax_domain_level < 0) { 5937 if (default_relax_domain_level < 0) 5938 return; 5939 else 5940 request = default_relax_domain_level; 5941 } else 5942 request = attr->relax_domain_level; 5943 if (request < sd->level) { 5944 /* turn off idle balance on this domain */ 5945 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 5946 } else { 5947 /* turn on idle balance on this domain */ 5948 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 5949 } 5950} 5951 5952static void __sdt_free(const struct cpumask *cpu_map); 5953static int __sdt_alloc(const struct cpumask *cpu_map); 5954 5955static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 5956 const struct cpumask *cpu_map) 5957{ 5958 switch (what) { 5959 case sa_rootdomain: 5960 if (!atomic_read(&d->rd->refcount)) 5961 free_rootdomain(&d->rd->rcu); /* fall through */ 5962 case sa_sd: 5963 free_percpu(d->sd); /* fall through */ 5964 case sa_sd_storage: 5965 __sdt_free(cpu_map); /* fall through */ 5966 case sa_none: 5967 break; 5968 } 5969} 5970 5971static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, 5972 const struct cpumask *cpu_map) 5973{ 5974 memset(d, 0, sizeof(*d)); 5975 5976 if (__sdt_alloc(cpu_map)) 5977 return sa_sd_storage; 5978 d->sd = alloc_percpu(struct sched_domain *); 5979 if (!d->sd) 5980 return sa_sd_storage; 5981 d->rd = alloc_rootdomain(); 5982 if (!d->rd) 5983 return sa_sd; 5984 return sa_rootdomain; 5985} 5986 5987/* 5988 * NULL the sd_data elements we've used to build the sched_domain and 5989 * sched_group structure so that the subsequent __free_domain_allocs() 5990 * will not free the data we're using. 5991 */ 5992static void claim_allocations(int cpu, struct sched_domain *sd) 5993{ 5994 struct sd_data *sdd = sd->private; 5995 5996 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 5997 *per_cpu_ptr(sdd->sd, cpu) = NULL; 5998 5999 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 6000 *per_cpu_ptr(sdd->sg, cpu) = NULL; 6001 6002 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 6003 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 6004} 6005 6006#ifdef CONFIG_NUMA 6007static int sched_domains_numa_levels; 6008static int *sched_domains_numa_distance; 6009static struct cpumask ***sched_domains_numa_masks; 6010static int sched_domains_curr_level; 6011#endif 6012 6013/* 6014 * SD_flags allowed in topology descriptions. 6015 * 6016 * SD_SHARE_CPUCAPACITY - describes SMT topologies 6017 * SD_SHARE_PKG_RESOURCES - describes shared caches 6018 * SD_NUMA - describes NUMA topologies 6019 * SD_SHARE_POWERDOMAIN - describes shared power domain 6020 * 6021 * Odd one out: 6022 * SD_ASYM_PACKING - describes SMT quirks 6023 */ 6024#define TOPOLOGY_SD_FLAGS \ 6025 (SD_SHARE_CPUCAPACITY | \ 6026 SD_SHARE_PKG_RESOURCES | \ 6027 SD_NUMA | \ 6028 SD_ASYM_PACKING | \ 6029 SD_SHARE_POWERDOMAIN) 6030 6031static struct sched_domain * 6032sd_init(struct sched_domain_topology_level *tl, int cpu) 6033{ 6034 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); 6035 int sd_weight, sd_flags = 0; 6036 6037#ifdef CONFIG_NUMA 6038 /* 6039 * Ugly hack to pass state to sd_numa_mask()... 6040 */ 6041 sched_domains_curr_level = tl->numa_level; 6042#endif 6043 6044 sd_weight = cpumask_weight(tl->mask(cpu)); 6045 6046 if (tl->sd_flags) 6047 sd_flags = (*tl->sd_flags)(); 6048 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 6049 "wrong sd_flags in topology description\n")) 6050 sd_flags &= ~TOPOLOGY_SD_FLAGS; 6051 6052 *sd = (struct sched_domain){ 6053 .min_interval = sd_weight, 6054 .max_interval = 2*sd_weight, 6055 .busy_factor = 32, 6056 .imbalance_pct = 125, 6057 6058 .cache_nice_tries = 0, 6059 .busy_idx = 0, 6060 .idle_idx = 0, 6061 .newidle_idx = 0, 6062 .wake_idx = 0, 6063 .forkexec_idx = 0, 6064 6065 .flags = 1*SD_LOAD_BALANCE 6066 | 1*SD_BALANCE_NEWIDLE 6067 | 1*SD_BALANCE_EXEC 6068 | 1*SD_BALANCE_FORK 6069 | 0*SD_BALANCE_WAKE 6070 | 1*SD_WAKE_AFFINE 6071 | 0*SD_SHARE_CPUCAPACITY 6072 | 0*SD_SHARE_PKG_RESOURCES 6073 | 0*SD_SERIALIZE 6074 | 0*SD_PREFER_SIBLING 6075 | 0*SD_NUMA 6076 | sd_flags 6077 , 6078 6079 .last_balance = jiffies, 6080 .balance_interval = sd_weight, 6081 .smt_gain = 0, 6082 .max_newidle_lb_cost = 0, 6083 .next_decay_max_lb_cost = jiffies, 6084#ifdef CONFIG_SCHED_DEBUG 6085 .name = tl->name, 6086#endif 6087 }; 6088 6089 /* 6090 * Convert topological properties into behaviour. 6091 */ 6092 6093 if (sd->flags & SD_SHARE_CPUCAPACITY) { 6094 sd->imbalance_pct = 110; 6095 sd->smt_gain = 1178; /* ~15% */ 6096 6097 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 6098 sd->imbalance_pct = 117; 6099 sd->cache_nice_tries = 1; 6100 sd->busy_idx = 2; 6101 6102#ifdef CONFIG_NUMA 6103 } else if (sd->flags & SD_NUMA) { 6104 sd->cache_nice_tries = 2; 6105 sd->busy_idx = 3; 6106 sd->idle_idx = 2; 6107 6108 sd->flags |= SD_SERIALIZE; 6109 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 6110 sd->flags &= ~(SD_BALANCE_EXEC | 6111 SD_BALANCE_FORK | 6112 SD_WAKE_AFFINE); 6113 } 6114 6115#endif 6116 } else { 6117 sd->flags |= SD_PREFER_SIBLING; 6118 sd->cache_nice_tries = 1; 6119 sd->busy_idx = 2; 6120 sd->idle_idx = 1; 6121 } 6122 6123 sd->private = &tl->data; 6124 6125 return sd; 6126} 6127 6128/* 6129 * Topology list, bottom-up. 6130 */ 6131static struct sched_domain_topology_level default_topology[] = { 6132#ifdef CONFIG_SCHED_SMT 6133 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 6134#endif 6135#ifdef CONFIG_SCHED_MC 6136 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 6137#endif 6138 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 6139 { NULL, }, 6140}; 6141 6142struct sched_domain_topology_level *sched_domain_topology = default_topology; 6143 6144#define for_each_sd_topology(tl) \ 6145 for (tl = sched_domain_topology; tl->mask; tl++) 6146 6147void set_sched_topology(struct sched_domain_topology_level *tl) 6148{ 6149 sched_domain_topology = tl; 6150} 6151 6152#ifdef CONFIG_NUMA 6153 6154static const struct cpumask *sd_numa_mask(int cpu) 6155{ 6156 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6157} 6158 6159static void sched_numa_warn(const char *str) 6160{ 6161 static int done = false; 6162 int i,j; 6163 6164 if (done) 6165 return; 6166 6167 done = true; 6168 6169 printk(KERN_WARNING "ERROR: %s\n\n", str); 6170 6171 for (i = 0; i < nr_node_ids; i++) { 6172 printk(KERN_WARNING " "); 6173 for (j = 0; j < nr_node_ids; j++) 6174 printk(KERN_CONT "%02d ", node_distance(i,j)); 6175 printk(KERN_CONT "\n"); 6176 } 6177 printk(KERN_WARNING "\n"); 6178} 6179 6180static bool find_numa_distance(int distance) 6181{ 6182 int i; 6183 6184 if (distance == node_distance(0, 0)) 6185 return true; 6186 6187 for (i = 0; i < sched_domains_numa_levels; i++) { 6188 if (sched_domains_numa_distance[i] == distance) 6189 return true; 6190 } 6191 6192 return false; 6193} 6194 6195static void sched_init_numa(void) 6196{ 6197 int next_distance, curr_distance = node_distance(0, 0); 6198 struct sched_domain_topology_level *tl; 6199 int level = 0; 6200 int i, j, k; 6201 6202 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6203 if (!sched_domains_numa_distance) 6204 return; 6205 6206 /* 6207 * O(nr_nodes^2) deduplicating selection sort -- in order to find the 6208 * unique distances in the node_distance() table. 6209 * 6210 * Assumes node_distance(0,j) includes all distances in 6211 * node_distance(i,j) in order to avoid cubic time. 6212 */ 6213 next_distance = curr_distance; 6214 for (i = 0; i < nr_node_ids; i++) { 6215 for (j = 0; j < nr_node_ids; j++) { 6216 for (k = 0; k < nr_node_ids; k++) { 6217 int distance = node_distance(i, k); 6218 6219 if (distance > curr_distance && 6220 (distance < next_distance || 6221 next_distance == curr_distance)) 6222 next_distance = distance; 6223 6224 /* 6225 * While not a strong assumption it would be nice to know 6226 * about cases where if node A is connected to B, B is not 6227 * equally connected to A. 6228 */ 6229 if (sched_debug() && node_distance(k, i) != distance) 6230 sched_numa_warn("Node-distance not symmetric"); 6231 6232 if (sched_debug() && i && !find_numa_distance(distance)) 6233 sched_numa_warn("Node-0 not representative"); 6234 } 6235 if (next_distance != curr_distance) { 6236 sched_domains_numa_distance[level++] = next_distance; 6237 sched_domains_numa_levels = level; 6238 curr_distance = next_distance; 6239 } else break; 6240 } 6241 6242 /* 6243 * In case of sched_debug() we verify the above assumption. 6244 */ 6245 if (!sched_debug()) 6246 break; 6247 } 6248 /* 6249 * 'level' contains the number of unique distances, excluding the 6250 * identity distance node_distance(i,i). 6251 * 6252 * The sched_domains_numa_distance[] array includes the actual distance 6253 * numbers. 6254 */ 6255 6256 /* 6257 * Here, we should temporarily reset sched_domains_numa_levels to 0. 6258 * If it fails to allocate memory for array sched_domains_numa_masks[][], 6259 * the array will contain less then 'level' members. This could be 6260 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 6261 * in other functions. 6262 * 6263 * We reset it to 'level' at the end of this function. 6264 */ 6265 sched_domains_numa_levels = 0; 6266 6267 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 6268 if (!sched_domains_numa_masks) 6269 return; 6270 6271 /* 6272 * Now for each level, construct a mask per node which contains all 6273 * cpus of nodes that are that many hops away from us. 6274 */ 6275 for (i = 0; i < level; i++) { 6276 sched_domains_numa_masks[i] = 6277 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 6278 if (!sched_domains_numa_masks[i]) 6279 return; 6280 6281 for (j = 0; j < nr_node_ids; j++) { 6282 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 6283 if (!mask) 6284 return; 6285 6286 sched_domains_numa_masks[i][j] = mask; 6287 6288 for (k = 0; k < nr_node_ids; k++) { 6289 if (node_distance(j, k) > sched_domains_numa_distance[i]) 6290 continue; 6291 6292 cpumask_or(mask, mask, cpumask_of_node(k)); 6293 } 6294 } 6295 } 6296 6297 /* Compute default topology size */ 6298 for (i = 0; sched_domain_topology[i].mask; i++); 6299 6300 tl = kzalloc((i + level + 1) * 6301 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 6302 if (!tl) 6303 return; 6304 6305 /* 6306 * Copy the default topology bits.. 6307 */ 6308 for (i = 0; sched_domain_topology[i].mask; i++) 6309 tl[i] = sched_domain_topology[i]; 6310 6311 /* 6312 * .. and append 'j' levels of NUMA goodness. 6313 */ 6314 for (j = 0; j < level; i++, j++) { 6315 tl[i] = (struct sched_domain_topology_level){ 6316 .mask = sd_numa_mask, 6317 .sd_flags = cpu_numa_flags, 6318 .flags = SDTL_OVERLAP, 6319 .numa_level = j, 6320 SD_INIT_NAME(NUMA) 6321 }; 6322 } 6323 6324 sched_domain_topology = tl; 6325 6326 sched_domains_numa_levels = level; 6327} 6328 6329static void sched_domains_numa_masks_set(int cpu) 6330{ 6331 int i, j; 6332 int node = cpu_to_node(cpu); 6333 6334 for (i = 0; i < sched_domains_numa_levels; i++) { 6335 for (j = 0; j < nr_node_ids; j++) { 6336 if (node_distance(j, node) <= sched_domains_numa_distance[i]) 6337 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 6338 } 6339 } 6340} 6341 6342static void sched_domains_numa_masks_clear(int cpu) 6343{ 6344 int i, j; 6345 for (i = 0; i < sched_domains_numa_levels; i++) { 6346 for (j = 0; j < nr_node_ids; j++) 6347 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 6348 } 6349} 6350 6351/* 6352 * Update sched_domains_numa_masks[level][node] array when new cpus 6353 * are onlined. 6354 */ 6355static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6356 unsigned long action, 6357 void *hcpu) 6358{ 6359 int cpu = (long)hcpu; 6360 6361 switch (action & ~CPU_TASKS_FROZEN) { 6362 case CPU_ONLINE: 6363 sched_domains_numa_masks_set(cpu); 6364 break; 6365 6366 case CPU_DEAD: 6367 sched_domains_numa_masks_clear(cpu); 6368 break; 6369 6370 default: 6371 return NOTIFY_DONE; 6372 } 6373 6374 return NOTIFY_OK; 6375} 6376#else 6377static inline void sched_init_numa(void) 6378{ 6379} 6380 6381static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6382 unsigned long action, 6383 void *hcpu) 6384{ 6385 return 0; 6386} 6387#endif /* CONFIG_NUMA */ 6388 6389static int __sdt_alloc(const struct cpumask *cpu_map) 6390{ 6391 struct sched_domain_topology_level *tl; 6392 int j; 6393 6394 for_each_sd_topology(tl) { 6395 struct sd_data *sdd = &tl->data; 6396 6397 sdd->sd = alloc_percpu(struct sched_domain *); 6398 if (!sdd->sd) 6399 return -ENOMEM; 6400 6401 sdd->sg = alloc_percpu(struct sched_group *); 6402 if (!sdd->sg) 6403 return -ENOMEM; 6404 6405 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 6406 if (!sdd->sgc) 6407 return -ENOMEM; 6408 6409 for_each_cpu(j, cpu_map) { 6410 struct sched_domain *sd; 6411 struct sched_group *sg; 6412 struct sched_group_capacity *sgc; 6413 6414 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 6415 GFP_KERNEL, cpu_to_node(j)); 6416 if (!sd) 6417 return -ENOMEM; 6418 6419 *per_cpu_ptr(sdd->sd, j) = sd; 6420 6421 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6422 GFP_KERNEL, cpu_to_node(j)); 6423 if (!sg) 6424 return -ENOMEM; 6425 6426 sg->next = sg; 6427 6428 *per_cpu_ptr(sdd->sg, j) = sg; 6429 6430 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 6431 GFP_KERNEL, cpu_to_node(j)); 6432 if (!sgc) 6433 return -ENOMEM; 6434 6435 *per_cpu_ptr(sdd->sgc, j) = sgc; 6436 } 6437 } 6438 6439 return 0; 6440} 6441 6442static void __sdt_free(const struct cpumask *cpu_map) 6443{ 6444 struct sched_domain_topology_level *tl; 6445 int j; 6446 6447 for_each_sd_topology(tl) { 6448 struct sd_data *sdd = &tl->data; 6449 6450 for_each_cpu(j, cpu_map) { 6451 struct sched_domain *sd; 6452 6453 if (sdd->sd) { 6454 sd = *per_cpu_ptr(sdd->sd, j); 6455 if (sd && (sd->flags & SD_OVERLAP)) 6456 free_sched_groups(sd->groups, 0); 6457 kfree(*per_cpu_ptr(sdd->sd, j)); 6458 } 6459 6460 if (sdd->sg) 6461 kfree(*per_cpu_ptr(sdd->sg, j)); 6462 if (sdd->sgc) 6463 kfree(*per_cpu_ptr(sdd->sgc, j)); 6464 } 6465 free_percpu(sdd->sd); 6466 sdd->sd = NULL; 6467 free_percpu(sdd->sg); 6468 sdd->sg = NULL; 6469 free_percpu(sdd->sgc); 6470 sdd->sgc = NULL; 6471 } 6472} 6473 6474struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 6475 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 6476 struct sched_domain *child, int cpu) 6477{ 6478 struct sched_domain *sd = sd_init(tl, cpu); 6479 if (!sd) 6480 return child; 6481 6482 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 6483 if (child) { 6484 sd->level = child->level + 1; 6485 sched_domain_level_max = max(sched_domain_level_max, sd->level); 6486 child->parent = sd; 6487 sd->child = child; 6488 } 6489 set_domain_attribute(sd, attr); 6490 6491 return sd; 6492} 6493 6494/* 6495 * Build sched domains for a given set of cpus and attach the sched domains 6496 * to the individual cpus 6497 */ 6498static int build_sched_domains(const struct cpumask *cpu_map, 6499 struct sched_domain_attr *attr) 6500{ 6501 enum s_alloc alloc_state; 6502 struct sched_domain *sd; 6503 struct s_data d; 6504 int i, ret = -ENOMEM; 6505 6506 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 6507 if (alloc_state != sa_rootdomain) 6508 goto error; 6509 6510 /* Set up domains for cpus specified by the cpu_map. */ 6511 for_each_cpu(i, cpu_map) { 6512 struct sched_domain_topology_level *tl; 6513 6514 sd = NULL; 6515 for_each_sd_topology(tl) { 6516 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 6517 if (tl == sched_domain_topology) 6518 *per_cpu_ptr(d.sd, i) = sd; 6519 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 6520 sd->flags |= SD_OVERLAP; 6521 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 6522 break; 6523 } 6524 } 6525 6526 /* Build the groups for the domains */ 6527 for_each_cpu(i, cpu_map) { 6528 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6529 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 6530 if (sd->flags & SD_OVERLAP) { 6531 if (build_overlap_sched_groups(sd, i)) 6532 goto error; 6533 } else { 6534 if (build_sched_groups(sd, i)) 6535 goto error; 6536 } 6537 } 6538 } 6539 6540 /* Calculate CPU capacity for physical packages and nodes */ 6541 for (i = nr_cpumask_bits-1; i >= 0; i--) { 6542 if (!cpumask_test_cpu(i, cpu_map)) 6543 continue; 6544 6545 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6546 claim_allocations(i, sd); 6547 init_sched_groups_capacity(i, sd); 6548 } 6549 } 6550 6551 /* Attach the domains */ 6552 rcu_read_lock(); 6553 for_each_cpu(i, cpu_map) { 6554 sd = *per_cpu_ptr(d.sd, i); 6555 cpu_attach_domain(sd, d.rd, i); 6556 } 6557 rcu_read_unlock(); 6558 6559 ret = 0; 6560error: 6561 __free_domain_allocs(&d, alloc_state, cpu_map); 6562 return ret; 6563} 6564 6565static cpumask_var_t *doms_cur; /* current sched domains */ 6566static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 6567static struct sched_domain_attr *dattr_cur; 6568 /* attribues of custom domains in 'doms_cur' */ 6569 6570/* 6571 * Special case: If a kmalloc of a doms_cur partition (array of 6572 * cpumask) fails, then fallback to a single sched domain, 6573 * as determined by the single cpumask fallback_doms. 6574 */ 6575static cpumask_var_t fallback_doms; 6576 6577/* 6578 * arch_update_cpu_topology lets virtualized architectures update the 6579 * cpu core maps. It is supposed to return 1 if the topology changed 6580 * or 0 if it stayed the same. 6581 */ 6582int __weak arch_update_cpu_topology(void) 6583{ 6584 return 0; 6585} 6586 6587cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 6588{ 6589 int i; 6590 cpumask_var_t *doms; 6591 6592 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 6593 if (!doms) 6594 return NULL; 6595 for (i = 0; i < ndoms; i++) { 6596 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 6597 free_sched_domains(doms, i); 6598 return NULL; 6599 } 6600 } 6601 return doms; 6602} 6603 6604void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 6605{ 6606 unsigned int i; 6607 for (i = 0; i < ndoms; i++) 6608 free_cpumask_var(doms[i]); 6609 kfree(doms); 6610} 6611 6612/* 6613 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6614 * For now this just excludes isolated cpus, but could be used to 6615 * exclude other special cases in the future. 6616 */ 6617static int init_sched_domains(const struct cpumask *cpu_map) 6618{ 6619 int err; 6620 6621 arch_update_cpu_topology(); 6622 ndoms_cur = 1; 6623 doms_cur = alloc_sched_domains(ndoms_cur); 6624 if (!doms_cur) 6625 doms_cur = &fallback_doms; 6626 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 6627 err = build_sched_domains(doms_cur[0], NULL); 6628 register_sched_domain_sysctl(); 6629 6630 return err; 6631} 6632 6633/* 6634 * Detach sched domains from a group of cpus specified in cpu_map 6635 * These cpus will now be attached to the NULL domain 6636 */ 6637static void detach_destroy_domains(const struct cpumask *cpu_map) 6638{ 6639 int i; 6640 6641 rcu_read_lock(); 6642 for_each_cpu(i, cpu_map) 6643 cpu_attach_domain(NULL, &def_root_domain, i); 6644 rcu_read_unlock(); 6645} 6646 6647/* handle null as "default" */ 6648static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 6649 struct sched_domain_attr *new, int idx_new) 6650{ 6651 struct sched_domain_attr tmp; 6652 6653 /* fast path */ 6654 if (!new && !cur) 6655 return 1; 6656 6657 tmp = SD_ATTR_INIT; 6658 return !memcmp(cur ? (cur + idx_cur) : &tmp, 6659 new ? (new + idx_new) : &tmp, 6660 sizeof(struct sched_domain_attr)); 6661} 6662 6663/* 6664 * Partition sched domains as specified by the 'ndoms_new' 6665 * cpumasks in the array doms_new[] of cpumasks. This compares 6666 * doms_new[] to the current sched domain partitioning, doms_cur[]. 6667 * It destroys each deleted domain and builds each new domain. 6668 * 6669 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 6670 * The masks don't intersect (don't overlap.) We should setup one 6671 * sched domain for each mask. CPUs not in any of the cpumasks will 6672 * not be load balanced. If the same cpumask appears both in the 6673 * current 'doms_cur' domains and in the new 'doms_new', we can leave 6674 * it as it is. 6675 * 6676 * The passed in 'doms_new' should be allocated using 6677 * alloc_sched_domains. This routine takes ownership of it and will 6678 * free_sched_domains it when done with it. If the caller failed the 6679 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 6680 * and partition_sched_domains() will fallback to the single partition 6681 * 'fallback_doms', it also forces the domains to be rebuilt. 6682 * 6683 * If doms_new == NULL it will be replaced with cpu_online_mask. 6684 * ndoms_new == 0 is a special case for destroying existing domains, 6685 * and it will not create the default domain. 6686 * 6687 * Call with hotplug lock held 6688 */ 6689void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 6690 struct sched_domain_attr *dattr_new) 6691{ 6692 int i, j, n; 6693 int new_topology; 6694 6695 mutex_lock(&sched_domains_mutex); 6696 6697 /* always unregister in case we don't destroy any domains */ 6698 unregister_sched_domain_sysctl(); 6699 6700 /* Let architecture update cpu core mappings. */ 6701 new_topology = arch_update_cpu_topology(); 6702 6703 n = doms_new ? ndoms_new : 0; 6704 6705 /* Destroy deleted domains */ 6706 for (i = 0; i < ndoms_cur; i++) { 6707 for (j = 0; j < n && !new_topology; j++) { 6708 if (cpumask_equal(doms_cur[i], doms_new[j]) 6709 && dattrs_equal(dattr_cur, i, dattr_new, j)) 6710 goto match1; 6711 } 6712 /* no match - a current sched domain not in new doms_new[] */ 6713 detach_destroy_domains(doms_cur[i]); 6714match1: 6715 ; 6716 } 6717 6718 n = ndoms_cur; 6719 if (doms_new == NULL) { 6720 n = 0; 6721 doms_new = &fallback_doms; 6722 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 6723 WARN_ON_ONCE(dattr_new); 6724 } 6725 6726 /* Build new domains */ 6727 for (i = 0; i < ndoms_new; i++) { 6728 for (j = 0; j < n && !new_topology; j++) { 6729 if (cpumask_equal(doms_new[i], doms_cur[j]) 6730 && dattrs_equal(dattr_new, i, dattr_cur, j)) 6731 goto match2; 6732 } 6733 /* no match - add a new doms_new */ 6734 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 6735match2: 6736 ; 6737 } 6738 6739 /* Remember the new sched domains */ 6740 if (doms_cur != &fallback_doms) 6741 free_sched_domains(doms_cur, ndoms_cur); 6742 kfree(dattr_cur); /* kfree(NULL) is safe */ 6743 doms_cur = doms_new; 6744 dattr_cur = dattr_new; 6745 ndoms_cur = ndoms_new; 6746 6747 register_sched_domain_sysctl(); 6748 6749 mutex_unlock(&sched_domains_mutex); 6750} 6751 6752static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ 6753 6754/* 6755 * Update cpusets according to cpu_active mask. If cpusets are 6756 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 6757 * around partition_sched_domains(). 6758 * 6759 * If we come here as part of a suspend/resume, don't touch cpusets because we 6760 * want to restore it back to its original state upon resume anyway. 6761 */ 6762static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 6763 void *hcpu) 6764{ 6765 switch (action) { 6766 case CPU_ONLINE_FROZEN: 6767 case CPU_DOWN_FAILED_FROZEN: 6768 6769 /* 6770 * num_cpus_frozen tracks how many CPUs are involved in suspend 6771 * resume sequence. As long as this is not the last online 6772 * operation in the resume sequence, just build a single sched 6773 * domain, ignoring cpusets. 6774 */ 6775 num_cpus_frozen--; 6776 if (likely(num_cpus_frozen)) { 6777 partition_sched_domains(1, NULL, NULL); 6778 break; 6779 } 6780 6781 /* 6782 * This is the last CPU online operation. So fall through and 6783 * restore the original sched domains by considering the 6784 * cpuset configurations. 6785 */ 6786 6787 case CPU_ONLINE: 6788 case CPU_DOWN_FAILED: 6789 cpuset_update_active_cpus(true); 6790 break; 6791 default: 6792 return NOTIFY_DONE; 6793 } 6794 return NOTIFY_OK; 6795} 6796 6797static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, 6798 void *hcpu) 6799{ 6800 switch (action) { 6801 case CPU_DOWN_PREPARE: 6802 cpuset_update_active_cpus(false); 6803 break; 6804 case CPU_DOWN_PREPARE_FROZEN: 6805 num_cpus_frozen++; 6806 partition_sched_domains(1, NULL, NULL); 6807 break; 6808 default: 6809 return NOTIFY_DONE; 6810 } 6811 return NOTIFY_OK; 6812} 6813 6814void __init sched_init_smp(void) 6815{ 6816 cpumask_var_t non_isolated_cpus; 6817 6818 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 6819 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 6820 6821 sched_init_numa(); 6822 6823 /* 6824 * There's no userspace yet to cause hotplug operations; hence all the 6825 * cpu masks are stable and all blatant races in the below code cannot 6826 * happen. 6827 */ 6828 mutex_lock(&sched_domains_mutex); 6829 init_sched_domains(cpu_active_mask); 6830 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 6831 if (cpumask_empty(non_isolated_cpus)) 6832 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 6833 mutex_unlock(&sched_domains_mutex); 6834 6835 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); 6836 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); 6837 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); 6838 6839 init_hrtick(); 6840 6841 /* Move init over to a non-isolated CPU */ 6842 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) 6843 BUG(); 6844 sched_init_granularity(); 6845 free_cpumask_var(non_isolated_cpus); 6846 6847 init_sched_rt_class(); 6848 init_sched_dl_class(); 6849} 6850#else 6851void __init sched_init_smp(void) 6852{ 6853 sched_init_granularity(); 6854} 6855#endif /* CONFIG_SMP */ 6856 6857const_debug unsigned int sysctl_timer_migration = 1; 6858 6859int in_sched_functions(unsigned long addr) 6860{ 6861 return in_lock_functions(addr) || 6862 (addr >= (unsigned long)__sched_text_start 6863 && addr < (unsigned long)__sched_text_end); 6864} 6865 6866#ifdef CONFIG_CGROUP_SCHED 6867/* 6868 * Default task group. 6869 * Every task in system belongs to this group at bootup. 6870 */ 6871struct task_group root_task_group; 6872LIST_HEAD(task_groups); 6873#endif 6874 6875DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 6876 6877void __init sched_init(void) 6878{ 6879 int i, j; 6880 unsigned long alloc_size = 0, ptr; 6881 6882#ifdef CONFIG_FAIR_GROUP_SCHED 6883 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 6884#endif 6885#ifdef CONFIG_RT_GROUP_SCHED 6886 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 6887#endif 6888#ifdef CONFIG_CPUMASK_OFFSTACK 6889 alloc_size += num_possible_cpus() * cpumask_size(); 6890#endif 6891 if (alloc_size) { 6892 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 6893 6894#ifdef CONFIG_FAIR_GROUP_SCHED 6895 root_task_group.se = (struct sched_entity **)ptr; 6896 ptr += nr_cpu_ids * sizeof(void **); 6897 6898 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 6899 ptr += nr_cpu_ids * sizeof(void **); 6900 6901#endif /* CONFIG_FAIR_GROUP_SCHED */ 6902#ifdef CONFIG_RT_GROUP_SCHED 6903 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 6904 ptr += nr_cpu_ids * sizeof(void **); 6905 6906 root_task_group.rt_rq = (struct rt_rq **)ptr; 6907 ptr += nr_cpu_ids * sizeof(void **); 6908 6909#endif /* CONFIG_RT_GROUP_SCHED */ 6910#ifdef CONFIG_CPUMASK_OFFSTACK 6911 for_each_possible_cpu(i) { 6912 per_cpu(load_balance_mask, i) = (void *)ptr; 6913 ptr += cpumask_size(); 6914 } 6915#endif /* CONFIG_CPUMASK_OFFSTACK */ 6916 } 6917 6918 init_rt_bandwidth(&def_rt_bandwidth, 6919 global_rt_period(), global_rt_runtime()); 6920 init_dl_bandwidth(&def_dl_bandwidth, 6921 global_rt_period(), global_rt_runtime()); 6922 6923#ifdef CONFIG_SMP 6924 init_defrootdomain(); 6925#endif 6926 6927#ifdef CONFIG_RT_GROUP_SCHED 6928 init_rt_bandwidth(&root_task_group.rt_bandwidth, 6929 global_rt_period(), global_rt_runtime()); 6930#endif /* CONFIG_RT_GROUP_SCHED */ 6931 6932#ifdef CONFIG_CGROUP_SCHED 6933 list_add(&root_task_group.list, &task_groups); 6934 INIT_LIST_HEAD(&root_task_group.children); 6935 INIT_LIST_HEAD(&root_task_group.siblings); 6936 autogroup_init(&init_task); 6937 6938#endif /* CONFIG_CGROUP_SCHED */ 6939 6940 for_each_possible_cpu(i) { 6941 struct rq *rq; 6942 6943 rq = cpu_rq(i); 6944 raw_spin_lock_init(&rq->lock); 6945 rq->nr_running = 0; 6946 rq->calc_load_active = 0; 6947 rq->calc_load_update = jiffies + LOAD_FREQ; 6948 init_cfs_rq(&rq->cfs); 6949 init_rt_rq(&rq->rt, rq); 6950 init_dl_rq(&rq->dl, rq); 6951#ifdef CONFIG_FAIR_GROUP_SCHED 6952 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 6953 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 6954 /* 6955 * How much cpu bandwidth does root_task_group get? 6956 * 6957 * In case of task-groups formed thr' the cgroup filesystem, it 6958 * gets 100% of the cpu resources in the system. This overall 6959 * system cpu resource is divided among the tasks of 6960 * root_task_group and its child task-groups in a fair manner, 6961 * based on each entity's (task or task-group's) weight 6962 * (se->load.weight). 6963 * 6964 * In other words, if root_task_group has 10 tasks of weight 6965 * 1024) and two child groups A0 and A1 (of weight 1024 each), 6966 * then A0's share of the cpu resource is: 6967 * 6968 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 6969 * 6970 * We achieve this by letting root_task_group's tasks sit 6971 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 6972 */ 6973 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 6974 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 6975#endif /* CONFIG_FAIR_GROUP_SCHED */ 6976 6977 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 6978#ifdef CONFIG_RT_GROUP_SCHED 6979 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 6980#endif 6981 6982 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 6983 rq->cpu_load[j] = 0; 6984 6985 rq->last_load_update_tick = jiffies; 6986 6987#ifdef CONFIG_SMP 6988 rq->sd = NULL; 6989 rq->rd = NULL; 6990 rq->cpu_capacity = SCHED_CAPACITY_SCALE; 6991 rq->post_schedule = 0; 6992 rq->active_balance = 0; 6993 rq->next_balance = jiffies; 6994 rq->push_cpu = 0; 6995 rq->cpu = i; 6996 rq->online = 0; 6997 rq->idle_stamp = 0; 6998 rq->avg_idle = 2*sysctl_sched_migration_cost; 6999 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 7000 7001 INIT_LIST_HEAD(&rq->cfs_tasks); 7002 7003 rq_attach_root(rq, &def_root_domain); 7004#ifdef CONFIG_NO_HZ_COMMON 7005 rq->nohz_flags = 0; 7006#endif 7007#ifdef CONFIG_NO_HZ_FULL 7008 rq->last_sched_tick = 0; 7009#endif 7010#endif 7011 init_rq_hrtick(rq); 7012 atomic_set(&rq->nr_iowait, 0); 7013 } 7014 7015 set_load_weight(&init_task); 7016 7017#ifdef CONFIG_PREEMPT_NOTIFIERS 7018 INIT_HLIST_HEAD(&init_task.preempt_notifiers); 7019#endif 7020 7021 /* 7022 * The boot idle thread does lazy MMU switching as well: 7023 */ 7024 atomic_inc(&init_mm.mm_count); 7025 enter_lazy_tlb(&init_mm, current); 7026 7027 /* 7028 * Make us the idle thread. Technically, schedule() should not be 7029 * called from this thread, however somewhere below it might be, 7030 * but because we are the idle thread, we just pick up running again 7031 * when this runqueue becomes "idle". 7032 */ 7033 init_idle(current, smp_processor_id()); 7034 7035 calc_load_update = jiffies + LOAD_FREQ; 7036 7037 /* 7038 * During early bootup we pretend to be a normal task: 7039 */ 7040 current->sched_class = &fair_sched_class; 7041 7042#ifdef CONFIG_SMP 7043 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7044 /* May be allocated at isolcpus cmdline parse time */ 7045 if (cpu_isolated_map == NULL) 7046 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7047 idle_thread_set_boot_cpu(); 7048 set_cpu_rq_start_time(); 7049#endif 7050 init_sched_fair_class(); 7051 7052 scheduler_running = 1; 7053} 7054 7055#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7056static inline int preempt_count_equals(int preempt_offset) 7057{ 7058 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); 7059 7060 return (nested == preempt_offset); 7061} 7062 7063void __might_sleep(const char *file, int line, int preempt_offset) 7064{ 7065 static unsigned long prev_jiffy; /* ratelimiting */ 7066 7067 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ 7068 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 7069 !is_idle_task(current)) || 7070 system_state != SYSTEM_RUNNING || oops_in_progress) 7071 return; 7072 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7073 return; 7074 prev_jiffy = jiffies; 7075 7076 printk(KERN_ERR 7077 "BUG: sleeping function called from invalid context at %s:%d\n", 7078 file, line); 7079 printk(KERN_ERR 7080 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 7081 in_atomic(), irqs_disabled(), 7082 current->pid, current->comm); 7083 7084 debug_show_held_locks(current); 7085 if (irqs_disabled()) 7086 print_irqtrace_events(current); 7087#ifdef CONFIG_DEBUG_PREEMPT 7088 if (!preempt_count_equals(preempt_offset)) { 7089 pr_err("Preemption disabled at:"); 7090 print_ip_sym(current->preempt_disable_ip); 7091 pr_cont("\n"); 7092 } 7093#endif 7094 dump_stack(); 7095} 7096EXPORT_SYMBOL(__might_sleep); 7097#endif 7098 7099#ifdef CONFIG_MAGIC_SYSRQ 7100static void normalize_task(struct rq *rq, struct task_struct *p) 7101{ 7102 const struct sched_class *prev_class = p->sched_class; 7103 struct sched_attr attr = { 7104 .sched_policy = SCHED_NORMAL, 7105 }; 7106 int old_prio = p->prio; 7107 int on_rq; 7108 7109 on_rq = p->on_rq; 7110 if (on_rq) 7111 dequeue_task(rq, p, 0); 7112 __setscheduler(rq, p, &attr); 7113 if (on_rq) { 7114 enqueue_task(rq, p, 0); 7115 resched_curr(rq); 7116 } 7117 7118 check_class_changed(rq, p, prev_class, old_prio); 7119} 7120 7121void normalize_rt_tasks(void) 7122{ 7123 struct task_struct *g, *p; 7124 unsigned long flags; 7125 struct rq *rq; 7126 7127 read_lock_irqsave(&tasklist_lock, flags); 7128 do_each_thread(g, p) { 7129 /* 7130 * Only normalize user tasks: 7131 */ 7132 if (!p->mm) 7133 continue; 7134 7135 p->se.exec_start = 0; 7136#ifdef CONFIG_SCHEDSTATS 7137 p->se.statistics.wait_start = 0; 7138 p->se.statistics.sleep_start = 0; 7139 p->se.statistics.block_start = 0; 7140#endif 7141 7142 if (!dl_task(p) && !rt_task(p)) { 7143 /* 7144 * Renice negative nice level userspace 7145 * tasks back to 0: 7146 */ 7147 if (task_nice(p) < 0 && p->mm) 7148 set_user_nice(p, 0); 7149 continue; 7150 } 7151 7152 raw_spin_lock(&p->pi_lock); 7153 rq = __task_rq_lock(p); 7154 7155 normalize_task(rq, p); 7156 7157 __task_rq_unlock(rq); 7158 raw_spin_unlock(&p->pi_lock); 7159 } while_each_thread(g, p); 7160 7161 read_unlock_irqrestore(&tasklist_lock, flags); 7162} 7163 7164#endif /* CONFIG_MAGIC_SYSRQ */ 7165 7166#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 7167/* 7168 * These functions are only useful for the IA64 MCA handling, or kdb. 7169 * 7170 * They can only be called when the whole system has been 7171 * stopped - every CPU needs to be quiescent, and no scheduling 7172 * activity can take place. Using them for anything else would 7173 * be a serious bug, and as a result, they aren't even visible 7174 * under any other configuration. 7175 */ 7176 7177/** 7178 * curr_task - return the current task for a given cpu. 7179 * @cpu: the processor in question. 7180 * 7181 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7182 * 7183 * Return: The current task for @cpu. 7184 */ 7185struct task_struct *curr_task(int cpu) 7186{ 7187 return cpu_curr(cpu); 7188} 7189 7190#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 7191 7192#ifdef CONFIG_IA64 7193/** 7194 * set_curr_task - set the current task for a given cpu. 7195 * @cpu: the processor in question. 7196 * @p: the task pointer to set. 7197 * 7198 * Description: This function must only be used when non-maskable interrupts 7199 * are serviced on a separate stack. It allows the architecture to switch the 7200 * notion of the current task on a cpu in a non-blocking manner. This function 7201 * must be called with all CPU's synchronized, and interrupts disabled, the 7202 * and caller must save the original value of the current task (see 7203 * curr_task() above) and restore that value before reenabling interrupts and 7204 * re-starting the system. 7205 * 7206 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7207 */ 7208void set_curr_task(int cpu, struct task_struct *p) 7209{ 7210 cpu_curr(cpu) = p; 7211} 7212 7213#endif 7214 7215#ifdef CONFIG_CGROUP_SCHED 7216/* task_group_lock serializes the addition/removal of task groups */ 7217static DEFINE_SPINLOCK(task_group_lock); 7218 7219static void free_sched_group(struct task_group *tg) 7220{ 7221 free_fair_sched_group(tg); 7222 free_rt_sched_group(tg); 7223 autogroup_free(tg); 7224 kfree(tg); 7225} 7226 7227/* allocate runqueue etc for a new task group */ 7228struct task_group *sched_create_group(struct task_group *parent) 7229{ 7230 struct task_group *tg; 7231 7232 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 7233 if (!tg) 7234 return ERR_PTR(-ENOMEM); 7235 7236 if (!alloc_fair_sched_group(tg, parent)) 7237 goto err; 7238 7239 if (!alloc_rt_sched_group(tg, parent)) 7240 goto err; 7241 7242 return tg; 7243 7244err: 7245 free_sched_group(tg); 7246 return ERR_PTR(-ENOMEM); 7247} 7248 7249void sched_online_group(struct task_group *tg, struct task_group *parent) 7250{ 7251 unsigned long flags; 7252 7253 spin_lock_irqsave(&task_group_lock, flags); 7254 list_add_rcu(&tg->list, &task_groups); 7255 7256 WARN_ON(!parent); /* root should already exist */ 7257 7258 tg->parent = parent; 7259 INIT_LIST_HEAD(&tg->children); 7260 list_add_rcu(&tg->siblings, &parent->children); 7261 spin_unlock_irqrestore(&task_group_lock, flags); 7262} 7263 7264/* rcu callback to free various structures associated with a task group */ 7265static void free_sched_group_rcu(struct rcu_head *rhp) 7266{ 7267 /* now it should be safe to free those cfs_rqs */ 7268 free_sched_group(container_of(rhp, struct task_group, rcu)); 7269} 7270 7271/* Destroy runqueue etc associated with a task group */ 7272void sched_destroy_group(struct task_group *tg) 7273{ 7274 /* wait for possible concurrent references to cfs_rqs complete */ 7275 call_rcu(&tg->rcu, free_sched_group_rcu); 7276} 7277 7278void sched_offline_group(struct task_group *tg) 7279{ 7280 unsigned long flags; 7281 int i; 7282 7283 /* end participation in shares distribution */ 7284 for_each_possible_cpu(i) 7285 unregister_fair_sched_group(tg, i); 7286 7287 spin_lock_irqsave(&task_group_lock, flags); 7288 list_del_rcu(&tg->list); 7289 list_del_rcu(&tg->siblings); 7290 spin_unlock_irqrestore(&task_group_lock, flags); 7291} 7292 7293/* change task's runqueue when it moves between groups. 7294 * The caller of this function should have put the task in its new group 7295 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to 7296 * reflect its new group. 7297 */ 7298void sched_move_task(struct task_struct *tsk) 7299{ 7300 struct task_group *tg; 7301 int on_rq, running; 7302 unsigned long flags; 7303 struct rq *rq; 7304 7305 rq = task_rq_lock(tsk, &flags); 7306 7307 running = task_current(rq, tsk); 7308 on_rq = tsk->on_rq; 7309 7310 if (on_rq) 7311 dequeue_task(rq, tsk, 0); 7312 if (unlikely(running)) 7313 tsk->sched_class->put_prev_task(rq, tsk); 7314 7315 tg = container_of(task_css_check(tsk, cpu_cgrp_id, 7316 lockdep_is_held(&tsk->sighand->siglock)), 7317 struct task_group, css); 7318 tg = autogroup_task_group(tsk, tg); 7319 tsk->sched_task_group = tg; 7320 7321#ifdef CONFIG_FAIR_GROUP_SCHED 7322 if (tsk->sched_class->task_move_group) 7323 tsk->sched_class->task_move_group(tsk, on_rq); 7324 else 7325#endif 7326 set_task_rq(tsk, task_cpu(tsk)); 7327 7328 if (unlikely(running)) 7329 tsk->sched_class->set_curr_task(rq); 7330 if (on_rq) 7331 enqueue_task(rq, tsk, 0); 7332 7333 task_rq_unlock(rq, tsk, &flags); 7334} 7335#endif /* CONFIG_CGROUP_SCHED */ 7336 7337#ifdef CONFIG_RT_GROUP_SCHED 7338/* 7339 * Ensure that the real time constraints are schedulable. 7340 */ 7341static DEFINE_MUTEX(rt_constraints_mutex); 7342 7343/* Must be called with tasklist_lock held */ 7344static inline int tg_has_rt_tasks(struct task_group *tg) 7345{ 7346 struct task_struct *g, *p; 7347 7348 do_each_thread(g, p) { 7349 if (rt_task(p) && task_rq(p)->rt.tg == tg) 7350 return 1; 7351 } while_each_thread(g, p); 7352 7353 return 0; 7354} 7355 7356struct rt_schedulable_data { 7357 struct task_group *tg; 7358 u64 rt_period; 7359 u64 rt_runtime; 7360}; 7361 7362static int tg_rt_schedulable(struct task_group *tg, void *data) 7363{ 7364 struct rt_schedulable_data *d = data; 7365 struct task_group *child; 7366 unsigned long total, sum = 0; 7367 u64 period, runtime; 7368 7369 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7370 runtime = tg->rt_bandwidth.rt_runtime; 7371 7372 if (tg == d->tg) { 7373 period = d->rt_period; 7374 runtime = d->rt_runtime; 7375 } 7376 7377 /* 7378 * Cannot have more runtime than the period. 7379 */ 7380 if (runtime > period && runtime != RUNTIME_INF) 7381 return -EINVAL; 7382 7383 /* 7384 * Ensure we don't starve existing RT tasks. 7385 */ 7386 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 7387 return -EBUSY; 7388 7389 total = to_ratio(period, runtime); 7390 7391 /* 7392 * Nobody can have more than the global setting allows. 7393 */ 7394 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 7395 return -EINVAL; 7396 7397 /* 7398 * The sum of our children's runtime should not exceed our own. 7399 */ 7400 list_for_each_entry_rcu(child, &tg->children, siblings) { 7401 period = ktime_to_ns(child->rt_bandwidth.rt_period); 7402 runtime = child->rt_bandwidth.rt_runtime; 7403 7404 if (child == d->tg) { 7405 period = d->rt_period; 7406 runtime = d->rt_runtime; 7407 } 7408 7409 sum += to_ratio(period, runtime); 7410 } 7411 7412 if (sum > total) 7413 return -EINVAL; 7414 7415 return 0; 7416} 7417 7418static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 7419{ 7420 int ret; 7421 7422 struct rt_schedulable_data data = { 7423 .tg = tg, 7424 .rt_period = period, 7425 .rt_runtime = runtime, 7426 }; 7427 7428 rcu_read_lock(); 7429 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 7430 rcu_read_unlock(); 7431 7432 return ret; 7433} 7434 7435static int tg_set_rt_bandwidth(struct task_group *tg, 7436 u64 rt_period, u64 rt_runtime) 7437{ 7438 int i, err = 0; 7439 7440 mutex_lock(&rt_constraints_mutex); 7441 read_lock(&tasklist_lock); 7442 err = __rt_schedulable(tg, rt_period, rt_runtime); 7443 if (err) 7444 goto unlock; 7445 7446 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7447 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 7448 tg->rt_bandwidth.rt_runtime = rt_runtime; 7449 7450 for_each_possible_cpu(i) { 7451 struct rt_rq *rt_rq = tg->rt_rq[i]; 7452 7453 raw_spin_lock(&rt_rq->rt_runtime_lock); 7454 rt_rq->rt_runtime = rt_runtime; 7455 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7456 } 7457 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7458unlock: 7459 read_unlock(&tasklist_lock); 7460 mutex_unlock(&rt_constraints_mutex); 7461 7462 return err; 7463} 7464 7465static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7466{ 7467 u64 rt_runtime, rt_period; 7468 7469 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7470 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7471 if (rt_runtime_us < 0) 7472 rt_runtime = RUNTIME_INF; 7473 7474 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7475} 7476 7477static long sched_group_rt_runtime(struct task_group *tg) 7478{ 7479 u64 rt_runtime_us; 7480 7481 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 7482 return -1; 7483 7484 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 7485 do_div(rt_runtime_us, NSEC_PER_USEC); 7486 return rt_runtime_us; 7487} 7488 7489static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) 7490{ 7491 u64 rt_runtime, rt_period; 7492 7493 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 7494 rt_runtime = tg->rt_bandwidth.rt_runtime; 7495 7496 if (rt_period == 0) 7497 return -EINVAL; 7498 7499 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7500} 7501 7502static long sched_group_rt_period(struct task_group *tg) 7503{ 7504 u64 rt_period_us; 7505 7506 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 7507 do_div(rt_period_us, NSEC_PER_USEC); 7508 return rt_period_us; 7509} 7510#endif /* CONFIG_RT_GROUP_SCHED */ 7511 7512#ifdef CONFIG_RT_GROUP_SCHED 7513static int sched_rt_global_constraints(void) 7514{ 7515 int ret = 0; 7516 7517 mutex_lock(&rt_constraints_mutex); 7518 read_lock(&tasklist_lock); 7519 ret = __rt_schedulable(NULL, 0, 0); 7520 read_unlock(&tasklist_lock); 7521 mutex_unlock(&rt_constraints_mutex); 7522 7523 return ret; 7524} 7525 7526static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 7527{ 7528 /* Don't accept realtime tasks when there is no way for them to run */ 7529 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 7530 return 0; 7531 7532 return 1; 7533} 7534 7535#else /* !CONFIG_RT_GROUP_SCHED */ 7536static int sched_rt_global_constraints(void) 7537{ 7538 unsigned long flags; 7539 int i, ret = 0; 7540 7541 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 7542 for_each_possible_cpu(i) { 7543 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 7544 7545 raw_spin_lock(&rt_rq->rt_runtime_lock); 7546 rt_rq->rt_runtime = global_rt_runtime(); 7547 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7548 } 7549 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 7550 7551 return ret; 7552} 7553#endif /* CONFIG_RT_GROUP_SCHED */ 7554 7555static int sched_dl_global_constraints(void) 7556{ 7557 u64 runtime = global_rt_runtime(); 7558 u64 period = global_rt_period(); 7559 u64 new_bw = to_ratio(period, runtime); 7560 int cpu, ret = 0; 7561 unsigned long flags; 7562 7563 /* 7564 * Here we want to check the bandwidth not being set to some 7565 * value smaller than the currently allocated bandwidth in 7566 * any of the root_domains. 7567 * 7568 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 7569 * cycling on root_domains... Discussion on different/better 7570 * solutions is welcome! 7571 */ 7572 for_each_possible_cpu(cpu) { 7573 struct dl_bw *dl_b = dl_bw_of(cpu); 7574 7575 raw_spin_lock_irqsave(&dl_b->lock, flags); 7576 if (new_bw < dl_b->total_bw) 7577 ret = -EBUSY; 7578 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7579 7580 if (ret) 7581 break; 7582 } 7583 7584 return ret; 7585} 7586 7587static void sched_dl_do_global(void) 7588{ 7589 u64 new_bw = -1; 7590 int cpu; 7591 unsigned long flags; 7592 7593 def_dl_bandwidth.dl_period = global_rt_period(); 7594 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 7595 7596 if (global_rt_runtime() != RUNTIME_INF) 7597 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 7598 7599 /* 7600 * FIXME: As above... 7601 */ 7602 for_each_possible_cpu(cpu) { 7603 struct dl_bw *dl_b = dl_bw_of(cpu); 7604 7605 raw_spin_lock_irqsave(&dl_b->lock, flags); 7606 dl_b->bw = new_bw; 7607 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7608 } 7609} 7610 7611static int sched_rt_global_validate(void) 7612{ 7613 if (sysctl_sched_rt_period <= 0) 7614 return -EINVAL; 7615 7616 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 7617 (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 7618 return -EINVAL; 7619 7620 return 0; 7621} 7622 7623static void sched_rt_do_global(void) 7624{ 7625 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 7626 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 7627} 7628 7629int sched_rt_handler(struct ctl_table *table, int write, 7630 void __user *buffer, size_t *lenp, 7631 loff_t *ppos) 7632{ 7633 int old_period, old_runtime; 7634 static DEFINE_MUTEX(mutex); 7635 int ret; 7636 7637 mutex_lock(&mutex); 7638 old_period = sysctl_sched_rt_period; 7639 old_runtime = sysctl_sched_rt_runtime; 7640 7641 ret = proc_dointvec(table, write, buffer, lenp, ppos); 7642 7643 if (!ret && write) { 7644 ret = sched_rt_global_validate(); 7645 if (ret) 7646 goto undo; 7647 7648 ret = sched_rt_global_constraints(); 7649 if (ret) 7650 goto undo; 7651 7652 ret = sched_dl_global_constraints(); 7653 if (ret) 7654 goto undo; 7655 7656 sched_rt_do_global(); 7657 sched_dl_do_global(); 7658 } 7659 if (0) { 7660undo: 7661 sysctl_sched_rt_period = old_period; 7662 sysctl_sched_rt_runtime = old_runtime; 7663 } 7664 mutex_unlock(&mutex); 7665 7666 return ret; 7667} 7668 7669int sched_rr_handler(struct ctl_table *table, int write, 7670 void __user *buffer, size_t *lenp, 7671 loff_t *ppos) 7672{ 7673 int ret; 7674 static DEFINE_MUTEX(mutex); 7675 7676 mutex_lock(&mutex); 7677 ret = proc_dointvec(table, write, buffer, lenp, ppos); 7678 /* make sure that internally we keep jiffies */ 7679 /* also, writing zero resets timeslice to default */ 7680 if (!ret && write) { 7681 sched_rr_timeslice = sched_rr_timeslice <= 0 ? 7682 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); 7683 } 7684 mutex_unlock(&mutex); 7685 return ret; 7686} 7687 7688#ifdef CONFIG_CGROUP_SCHED 7689 7690static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 7691{ 7692 return css ? container_of(css, struct task_group, css) : NULL; 7693} 7694 7695static struct cgroup_subsys_state * 7696cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 7697{ 7698 struct task_group *parent = css_tg(parent_css); 7699 struct task_group *tg; 7700 7701 if (!parent) { 7702 /* This is early initialization for the top cgroup */ 7703 return &root_task_group.css; 7704 } 7705 7706 tg = sched_create_group(parent); 7707 if (IS_ERR(tg)) 7708 return ERR_PTR(-ENOMEM); 7709 7710 return &tg->css; 7711} 7712 7713static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 7714{ 7715 struct task_group *tg = css_tg(css); 7716 struct task_group *parent = css_tg(css->parent); 7717 7718 if (parent) 7719 sched_online_group(tg, parent); 7720 return 0; 7721} 7722 7723static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 7724{ 7725 struct task_group *tg = css_tg(css); 7726 7727 sched_destroy_group(tg); 7728} 7729 7730static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) 7731{ 7732 struct task_group *tg = css_tg(css); 7733 7734 sched_offline_group(tg); 7735} 7736 7737static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 7738 struct cgroup_taskset *tset) 7739{ 7740 struct task_struct *task; 7741 7742 cgroup_taskset_for_each(task, tset) { 7743#ifdef CONFIG_RT_GROUP_SCHED 7744 if (!sched_rt_can_attach(css_tg(css), task)) 7745 return -EINVAL; 7746#else 7747 /* We don't support RT-tasks being in separate groups */ 7748 if (task->sched_class != &fair_sched_class) 7749 return -EINVAL; 7750#endif 7751 } 7752 return 0; 7753} 7754 7755static void cpu_cgroup_attach(struct cgroup_subsys_state *css, 7756 struct cgroup_taskset *tset) 7757{ 7758 struct task_struct *task; 7759 7760 cgroup_taskset_for_each(task, tset) 7761 sched_move_task(task); 7762} 7763 7764static void cpu_cgroup_exit(struct cgroup_subsys_state *css, 7765 struct cgroup_subsys_state *old_css, 7766 struct task_struct *task) 7767{ 7768 /* 7769 * cgroup_exit() is called in the copy_process() failure path. 7770 * Ignore this case since the task hasn't ran yet, this avoids 7771 * trying to poke a half freed task state from generic code. 7772 */ 7773 if (!(task->flags & PF_EXITING)) 7774 return; 7775 7776 sched_move_task(task); 7777} 7778 7779#ifdef CONFIG_FAIR_GROUP_SCHED 7780static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 7781 struct cftype *cftype, u64 shareval) 7782{ 7783 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 7784} 7785 7786static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 7787 struct cftype *cft) 7788{ 7789 struct task_group *tg = css_tg(css); 7790 7791 return (u64) scale_load_down(tg->shares); 7792} 7793 7794#ifdef CONFIG_CFS_BANDWIDTH 7795static DEFINE_MUTEX(cfs_constraints_mutex); 7796 7797const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 7798const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 7799 7800static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 7801 7802static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 7803{ 7804 int i, ret = 0, runtime_enabled, runtime_was_enabled; 7805 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7806 7807 if (tg == &root_task_group) 7808 return -EINVAL; 7809 7810 /* 7811 * Ensure we have at some amount of bandwidth every period. This is 7812 * to prevent reaching a state of large arrears when throttled via 7813 * entity_tick() resulting in prolonged exit starvation. 7814 */ 7815 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 7816 return -EINVAL; 7817 7818 /* 7819 * Likewise, bound things on the otherside by preventing insane quota 7820 * periods. This also allows us to normalize in computing quota 7821 * feasibility. 7822 */ 7823 if (period > max_cfs_quota_period) 7824 return -EINVAL; 7825 7826 /* 7827 * Prevent race between setting of cfs_rq->runtime_enabled and 7828 * unthrottle_offline_cfs_rqs(). 7829 */ 7830 get_online_cpus(); 7831 mutex_lock(&cfs_constraints_mutex); 7832 ret = __cfs_schedulable(tg, period, quota); 7833 if (ret) 7834 goto out_unlock; 7835 7836 runtime_enabled = quota != RUNTIME_INF; 7837 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7838 /* 7839 * If we need to toggle cfs_bandwidth_used, off->on must occur 7840 * before making related changes, and on->off must occur afterwards 7841 */ 7842 if (runtime_enabled && !runtime_was_enabled) 7843 cfs_bandwidth_usage_inc(); 7844 raw_spin_lock_irq(&cfs_b->lock); 7845 cfs_b->period = ns_to_ktime(period); 7846 cfs_b->quota = quota; 7847 7848 __refill_cfs_bandwidth_runtime(cfs_b); 7849 /* restart the period timer (if active) to handle new period expiry */ 7850 if (runtime_enabled && cfs_b->timer_active) { 7851 /* force a reprogram */ 7852 __start_cfs_bandwidth(cfs_b, true); 7853 } 7854 raw_spin_unlock_irq(&cfs_b->lock); 7855 7856 for_each_online_cpu(i) { 7857 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 7858 struct rq *rq = cfs_rq->rq; 7859 7860 raw_spin_lock_irq(&rq->lock); 7861 cfs_rq->runtime_enabled = runtime_enabled; 7862 cfs_rq->runtime_remaining = 0; 7863 7864 if (cfs_rq->throttled) 7865 unthrottle_cfs_rq(cfs_rq); 7866 raw_spin_unlock_irq(&rq->lock); 7867 } 7868 if (runtime_was_enabled && !runtime_enabled) 7869 cfs_bandwidth_usage_dec(); 7870out_unlock: 7871 mutex_unlock(&cfs_constraints_mutex); 7872 put_online_cpus(); 7873 7874 return ret; 7875} 7876 7877int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 7878{ 7879 u64 quota, period; 7880 7881 period = ktime_to_ns(tg->cfs_bandwidth.period); 7882 if (cfs_quota_us < 0) 7883 quota = RUNTIME_INF; 7884 else 7885 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 7886 7887 return tg_set_cfs_bandwidth(tg, period, quota); 7888} 7889 7890long tg_get_cfs_quota(struct task_group *tg) 7891{ 7892 u64 quota_us; 7893 7894 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 7895 return -1; 7896 7897 quota_us = tg->cfs_bandwidth.quota; 7898 do_div(quota_us, NSEC_PER_USEC); 7899 7900 return quota_us; 7901} 7902 7903int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 7904{ 7905 u64 quota, period; 7906 7907 period = (u64)cfs_period_us * NSEC_PER_USEC; 7908 quota = tg->cfs_bandwidth.quota; 7909 7910 return tg_set_cfs_bandwidth(tg, period, quota); 7911} 7912 7913long tg_get_cfs_period(struct task_group *tg) 7914{ 7915 u64 cfs_period_us; 7916 7917 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 7918 do_div(cfs_period_us, NSEC_PER_USEC); 7919 7920 return cfs_period_us; 7921} 7922 7923static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 7924 struct cftype *cft) 7925{ 7926 return tg_get_cfs_quota(css_tg(css)); 7927} 7928 7929static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 7930 struct cftype *cftype, s64 cfs_quota_us) 7931{ 7932 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 7933} 7934 7935static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 7936 struct cftype *cft) 7937{ 7938 return tg_get_cfs_period(css_tg(css)); 7939} 7940 7941static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 7942 struct cftype *cftype, u64 cfs_period_us) 7943{ 7944 return tg_set_cfs_period(css_tg(css), cfs_period_us); 7945} 7946 7947struct cfs_schedulable_data { 7948 struct task_group *tg; 7949 u64 period, quota; 7950}; 7951 7952/* 7953 * normalize group quota/period to be quota/max_period 7954 * note: units are usecs 7955 */ 7956static u64 normalize_cfs_quota(struct task_group *tg, 7957 struct cfs_schedulable_data *d) 7958{ 7959 u64 quota, period; 7960 7961 if (tg == d->tg) { 7962 period = d->period; 7963 quota = d->quota; 7964 } else { 7965 period = tg_get_cfs_period(tg); 7966 quota = tg_get_cfs_quota(tg); 7967 } 7968 7969 /* note: these should typically be equivalent */ 7970 if (quota == RUNTIME_INF || quota == -1) 7971 return RUNTIME_INF; 7972 7973 return to_ratio(period, quota); 7974} 7975 7976static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 7977{ 7978 struct cfs_schedulable_data *d = data; 7979 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7980 s64 quota = 0, parent_quota = -1; 7981 7982 if (!tg->parent) { 7983 quota = RUNTIME_INF; 7984 } else { 7985 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 7986 7987 quota = normalize_cfs_quota(tg, d); 7988 parent_quota = parent_b->hierarchal_quota; 7989 7990 /* 7991 * ensure max(child_quota) <= parent_quota, inherit when no 7992 * limit is set 7993 */ 7994 if (quota == RUNTIME_INF) 7995 quota = parent_quota; 7996 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 7997 return -EINVAL; 7998 } 7999 cfs_b->hierarchal_quota = quota; 8000 8001 return 0; 8002} 8003 8004static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 8005{ 8006 int ret; 8007 struct cfs_schedulable_data data = { 8008 .tg = tg, 8009 .period = period, 8010 .quota = quota, 8011 }; 8012 8013 if (quota != RUNTIME_INF) { 8014 do_div(data.period, NSEC_PER_USEC); 8015 do_div(data.quota, NSEC_PER_USEC); 8016 } 8017 8018 rcu_read_lock(); 8019 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 8020 rcu_read_unlock(); 8021 8022 return ret; 8023} 8024 8025static int cpu_stats_show(struct seq_file *sf, void *v) 8026{ 8027 struct task_group *tg = css_tg(seq_css(sf)); 8028 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8029 8030 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 8031 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 8032 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 8033 8034 return 0; 8035} 8036#endif /* CONFIG_CFS_BANDWIDTH */ 8037#endif /* CONFIG_FAIR_GROUP_SCHED */ 8038 8039#ifdef CONFIG_RT_GROUP_SCHED 8040static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 8041 struct cftype *cft, s64 val) 8042{ 8043 return sched_group_set_rt_runtime(css_tg(css), val); 8044} 8045 8046static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 8047 struct cftype *cft) 8048{ 8049 return sched_group_rt_runtime(css_tg(css)); 8050} 8051 8052static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 8053 struct cftype *cftype, u64 rt_period_us) 8054{ 8055 return sched_group_set_rt_period(css_tg(css), rt_period_us); 8056} 8057 8058static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 8059 struct cftype *cft) 8060{ 8061 return sched_group_rt_period(css_tg(css)); 8062} 8063#endif /* CONFIG_RT_GROUP_SCHED */ 8064 8065static struct cftype cpu_files[] = { 8066#ifdef CONFIG_FAIR_GROUP_SCHED 8067 { 8068 .name = "shares", 8069 .read_u64 = cpu_shares_read_u64, 8070 .write_u64 = cpu_shares_write_u64, 8071 }, 8072#endif 8073#ifdef CONFIG_CFS_BANDWIDTH 8074 { 8075 .name = "cfs_quota_us", 8076 .read_s64 = cpu_cfs_quota_read_s64, 8077 .write_s64 = cpu_cfs_quota_write_s64, 8078 }, 8079 { 8080 .name = "cfs_period_us", 8081 .read_u64 = cpu_cfs_period_read_u64, 8082 .write_u64 = cpu_cfs_period_write_u64, 8083 }, 8084 { 8085 .name = "stat", 8086 .seq_show = cpu_stats_show, 8087 }, 8088#endif 8089#ifdef CONFIG_RT_GROUP_SCHED 8090 { 8091 .name = "rt_runtime_us", 8092 .read_s64 = cpu_rt_runtime_read, 8093 .write_s64 = cpu_rt_runtime_write, 8094 }, 8095 { 8096 .name = "rt_period_us", 8097 .read_u64 = cpu_rt_period_read_uint, 8098 .write_u64 = cpu_rt_period_write_uint, 8099 }, 8100#endif 8101 { } /* terminate */ 8102}; 8103 8104struct cgroup_subsys cpu_cgrp_subsys = { 8105 .css_alloc = cpu_cgroup_css_alloc, 8106 .css_free = cpu_cgroup_css_free, 8107 .css_online = cpu_cgroup_css_online, 8108 .css_offline = cpu_cgroup_css_offline, 8109 .can_attach = cpu_cgroup_can_attach, 8110 .attach = cpu_cgroup_attach, 8111 .exit = cpu_cgroup_exit, 8112 .base_cftypes = cpu_files, 8113 .early_init = 1, 8114}; 8115 8116#endif /* CONFIG_CGROUP_SCHED */ 8117 8118void dump_cpu_task(int cpu) 8119{ 8120 pr_info("Task dump for CPU %d:\n", cpu); 8121 sched_show_task(cpu_curr(cpu)); 8122} 8123