core.c revision 1292531f6f27af909e713671dd9cc3bcab8114b7
1/* 2 * kernel/sched/core.c 3 * 4 * Kernel scheduler and related syscalls 5 * 6 * Copyright (C) 1991-2002 Linus Torvalds 7 * 8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and 9 * make semaphores SMP safe 10 * 1998-11-19 Implemented schedule_timeout() and related stuff 11 * by Andrea Arcangeli 12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: 13 * hybrid priority-list and round-robin design with 14 * an array-switch method of distributing timeslices 15 * and per-CPU runqueues. Cleanups and useful suggestions 16 * by Davide Libenzi, preemptible kernel bits by Robert Love. 17 * 2003-09-03 Interactivity tuning by Con Kolivas. 18 * 2004-04-02 Scheduler domains code by Nick Piggin 19 * 2007-04-15 Work begun on replacing all interactivity tuning with a 20 * fair scheduling design by Con Kolivas. 21 * 2007-05-05 Load balancing (smp-nice) and other improvements 22 * by Peter Williams 23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith 24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri 25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, 26 * Thomas Gleixner, Mike Kravetz 27 */ 28 29#include <linux/mm.h> 30#include <linux/module.h> 31#include <linux/nmi.h> 32#include <linux/init.h> 33#include <linux/uaccess.h> 34#include <linux/highmem.h> 35#include <asm/mmu_context.h> 36#include <linux/interrupt.h> 37#include <linux/capability.h> 38#include <linux/completion.h> 39#include <linux/kernel_stat.h> 40#include <linux/debug_locks.h> 41#include <linux/perf_event.h> 42#include <linux/security.h> 43#include <linux/notifier.h> 44#include <linux/profile.h> 45#include <linux/freezer.h> 46#include <linux/vmalloc.h> 47#include <linux/blkdev.h> 48#include <linux/delay.h> 49#include <linux/pid_namespace.h> 50#include <linux/smp.h> 51#include <linux/threads.h> 52#include <linux/timer.h> 53#include <linux/rcupdate.h> 54#include <linux/cpu.h> 55#include <linux/cpuset.h> 56#include <linux/percpu.h> 57#include <linux/proc_fs.h> 58#include <linux/seq_file.h> 59#include <linux/sysctl.h> 60#include <linux/syscalls.h> 61#include <linux/times.h> 62#include <linux/tsacct_kern.h> 63#include <linux/kprobes.h> 64#include <linux/delayacct.h> 65#include <linux/unistd.h> 66#include <linux/pagemap.h> 67#include <linux/hrtimer.h> 68#include <linux/tick.h> 69#include <linux/debugfs.h> 70#include <linux/ctype.h> 71#include <linux/ftrace.h> 72#include <linux/slab.h> 73#include <linux/init_task.h> 74#include <linux/binfmts.h> 75 76#include <asm/switch_to.h> 77#include <asm/tlb.h> 78#include <asm/irq_regs.h> 79#include <asm/mutex.h> 80#ifdef CONFIG_PARAVIRT 81#include <asm/paravirt.h> 82#endif 83 84#include "sched.h" 85#include "../workqueue_sched.h" 86#include "../smpboot.h" 87 88#define CREATE_TRACE_POINTS 89#include <trace/events/sched.h> 90 91void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) 92{ 93 unsigned long delta; 94 ktime_t soft, hard, now; 95 96 for (;;) { 97 if (hrtimer_active(period_timer)) 98 break; 99 100 now = hrtimer_cb_get_time(period_timer); 101 hrtimer_forward(period_timer, now, period); 102 103 soft = hrtimer_get_softexpires(period_timer); 104 hard = hrtimer_get_expires(period_timer); 105 delta = ktime_to_ns(ktime_sub(hard, soft)); 106 __hrtimer_start_range_ns(period_timer, soft, delta, 107 HRTIMER_MODE_ABS_PINNED, 0); 108 } 109} 110 111DEFINE_MUTEX(sched_domains_mutex); 112DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 113 114static void update_rq_clock_task(struct rq *rq, s64 delta); 115 116void update_rq_clock(struct rq *rq) 117{ 118 s64 delta; 119 120 if (rq->skip_clock_update > 0) 121 return; 122 123 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 124 rq->clock += delta; 125 update_rq_clock_task(rq, delta); 126} 127 128/* 129 * Debugging: various feature bits 130 */ 131 132#define SCHED_FEAT(name, enabled) \ 133 (1UL << __SCHED_FEAT_##name) * enabled | 134 135const_debug unsigned int sysctl_sched_features = 136#include "features.h" 137 0; 138 139#undef SCHED_FEAT 140 141#ifdef CONFIG_SCHED_DEBUG 142#define SCHED_FEAT(name, enabled) \ 143 #name , 144 145static const char * const sched_feat_names[] = { 146#include "features.h" 147 NULL 148}; 149 150#undef SCHED_FEAT 151 152static int sched_feat_show(struct seq_file *m, void *v) 153{ 154 int i; 155 156 for (i = 0; i < __SCHED_FEAT_NR; i++) { 157 if (!(sysctl_sched_features & (1UL << i))) 158 seq_puts(m, "NO_"); 159 seq_printf(m, "%s ", sched_feat_names[i]); 160 } 161 seq_puts(m, "\n"); 162 163 return 0; 164} 165 166#ifdef HAVE_JUMP_LABEL 167 168#define jump_label_key__true STATIC_KEY_INIT_TRUE 169#define jump_label_key__false STATIC_KEY_INIT_FALSE 170 171#define SCHED_FEAT(name, enabled) \ 172 jump_label_key__##enabled , 173 174struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 175#include "features.h" 176}; 177 178#undef SCHED_FEAT 179 180static void sched_feat_disable(int i) 181{ 182 if (static_key_enabled(&sched_feat_keys[i])) 183 static_key_slow_dec(&sched_feat_keys[i]); 184} 185 186static void sched_feat_enable(int i) 187{ 188 if (!static_key_enabled(&sched_feat_keys[i])) 189 static_key_slow_inc(&sched_feat_keys[i]); 190} 191#else 192static void sched_feat_disable(int i) { }; 193static void sched_feat_enable(int i) { }; 194#endif /* HAVE_JUMP_LABEL */ 195 196static ssize_t 197sched_feat_write(struct file *filp, const char __user *ubuf, 198 size_t cnt, loff_t *ppos) 199{ 200 char buf[64]; 201 char *cmp; 202 int neg = 0; 203 int i; 204 205 if (cnt > 63) 206 cnt = 63; 207 208 if (copy_from_user(&buf, ubuf, cnt)) 209 return -EFAULT; 210 211 buf[cnt] = 0; 212 cmp = strstrip(buf); 213 214 if (strncmp(cmp, "NO_", 3) == 0) { 215 neg = 1; 216 cmp += 3; 217 } 218 219 for (i = 0; i < __SCHED_FEAT_NR; i++) { 220 if (strcmp(cmp, sched_feat_names[i]) == 0) { 221 if (neg) { 222 sysctl_sched_features &= ~(1UL << i); 223 sched_feat_disable(i); 224 } else { 225 sysctl_sched_features |= (1UL << i); 226 sched_feat_enable(i); 227 } 228 break; 229 } 230 } 231 232 if (i == __SCHED_FEAT_NR) 233 return -EINVAL; 234 235 *ppos += cnt; 236 237 return cnt; 238} 239 240static int sched_feat_open(struct inode *inode, struct file *filp) 241{ 242 return single_open(filp, sched_feat_show, NULL); 243} 244 245static const struct file_operations sched_feat_fops = { 246 .open = sched_feat_open, 247 .write = sched_feat_write, 248 .read = seq_read, 249 .llseek = seq_lseek, 250 .release = single_release, 251}; 252 253static __init int sched_init_debug(void) 254{ 255 debugfs_create_file("sched_features", 0644, NULL, NULL, 256 &sched_feat_fops); 257 258 return 0; 259} 260late_initcall(sched_init_debug); 261#endif /* CONFIG_SCHED_DEBUG */ 262 263/* 264 * Number of tasks to iterate in a single balance run. 265 * Limited because this is done with IRQs disabled. 266 */ 267const_debug unsigned int sysctl_sched_nr_migrate = 32; 268 269/* 270 * period over which we average the RT time consumption, measured 271 * in ms. 272 * 273 * default: 1s 274 */ 275const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; 276 277/* 278 * period over which we measure -rt task cpu usage in us. 279 * default: 1s 280 */ 281unsigned int sysctl_sched_rt_period = 1000000; 282 283__read_mostly int scheduler_running; 284 285/* 286 * part of the period that we allow rt tasks to run in us. 287 * default: 0.95s 288 */ 289int sysctl_sched_rt_runtime = 950000; 290 291 292 293/* 294 * __task_rq_lock - lock the rq @p resides on. 295 */ 296static inline struct rq *__task_rq_lock(struct task_struct *p) 297 __acquires(rq->lock) 298{ 299 struct rq *rq; 300 301 lockdep_assert_held(&p->pi_lock); 302 303 for (;;) { 304 rq = task_rq(p); 305 raw_spin_lock(&rq->lock); 306 if (likely(rq == task_rq(p))) 307 return rq; 308 raw_spin_unlock(&rq->lock); 309 } 310} 311 312/* 313 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 314 */ 315static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 316 __acquires(p->pi_lock) 317 __acquires(rq->lock) 318{ 319 struct rq *rq; 320 321 for (;;) { 322 raw_spin_lock_irqsave(&p->pi_lock, *flags); 323 rq = task_rq(p); 324 raw_spin_lock(&rq->lock); 325 if (likely(rq == task_rq(p))) 326 return rq; 327 raw_spin_unlock(&rq->lock); 328 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 329 } 330} 331 332static void __task_rq_unlock(struct rq *rq) 333 __releases(rq->lock) 334{ 335 raw_spin_unlock(&rq->lock); 336} 337 338static inline void 339task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) 340 __releases(rq->lock) 341 __releases(p->pi_lock) 342{ 343 raw_spin_unlock(&rq->lock); 344 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 345} 346 347/* 348 * this_rq_lock - lock this runqueue and disable interrupts. 349 */ 350static struct rq *this_rq_lock(void) 351 __acquires(rq->lock) 352{ 353 struct rq *rq; 354 355 local_irq_disable(); 356 rq = this_rq(); 357 raw_spin_lock(&rq->lock); 358 359 return rq; 360} 361 362#ifdef CONFIG_SCHED_HRTICK 363/* 364 * Use HR-timers to deliver accurate preemption points. 365 * 366 * Its all a bit involved since we cannot program an hrt while holding the 367 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a 368 * reschedule event. 369 * 370 * When we get rescheduled we reprogram the hrtick_timer outside of the 371 * rq->lock. 372 */ 373 374static void hrtick_clear(struct rq *rq) 375{ 376 if (hrtimer_active(&rq->hrtick_timer)) 377 hrtimer_cancel(&rq->hrtick_timer); 378} 379 380/* 381 * High-resolution timer tick. 382 * Runs from hardirq context with interrupts disabled. 383 */ 384static enum hrtimer_restart hrtick(struct hrtimer *timer) 385{ 386 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 387 388 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 389 390 raw_spin_lock(&rq->lock); 391 update_rq_clock(rq); 392 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 393 raw_spin_unlock(&rq->lock); 394 395 return HRTIMER_NORESTART; 396} 397 398#ifdef CONFIG_SMP 399/* 400 * called from hardirq (IPI) context 401 */ 402static void __hrtick_start(void *arg) 403{ 404 struct rq *rq = arg; 405 406 raw_spin_lock(&rq->lock); 407 hrtimer_restart(&rq->hrtick_timer); 408 rq->hrtick_csd_pending = 0; 409 raw_spin_unlock(&rq->lock); 410} 411 412/* 413 * Called to set the hrtick timer state. 414 * 415 * called with rq->lock held and irqs disabled 416 */ 417void hrtick_start(struct rq *rq, u64 delay) 418{ 419 struct hrtimer *timer = &rq->hrtick_timer; 420 ktime_t time = ktime_add_ns(timer->base->get_time(), delay); 421 422 hrtimer_set_expires(timer, time); 423 424 if (rq == this_rq()) { 425 hrtimer_restart(timer); 426 } else if (!rq->hrtick_csd_pending) { 427 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); 428 rq->hrtick_csd_pending = 1; 429 } 430} 431 432static int 433hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) 434{ 435 int cpu = (int)(long)hcpu; 436 437 switch (action) { 438 case CPU_UP_CANCELED: 439 case CPU_UP_CANCELED_FROZEN: 440 case CPU_DOWN_PREPARE: 441 case CPU_DOWN_PREPARE_FROZEN: 442 case CPU_DEAD: 443 case CPU_DEAD_FROZEN: 444 hrtick_clear(cpu_rq(cpu)); 445 return NOTIFY_OK; 446 } 447 448 return NOTIFY_DONE; 449} 450 451static __init void init_hrtick(void) 452{ 453 hotcpu_notifier(hotplug_hrtick, 0); 454} 455#else 456/* 457 * Called to set the hrtick timer state. 458 * 459 * called with rq->lock held and irqs disabled 460 */ 461void hrtick_start(struct rq *rq, u64 delay) 462{ 463 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, 464 HRTIMER_MODE_REL_PINNED, 0); 465} 466 467static inline void init_hrtick(void) 468{ 469} 470#endif /* CONFIG_SMP */ 471 472static void init_rq_hrtick(struct rq *rq) 473{ 474#ifdef CONFIG_SMP 475 rq->hrtick_csd_pending = 0; 476 477 rq->hrtick_csd.flags = 0; 478 rq->hrtick_csd.func = __hrtick_start; 479 rq->hrtick_csd.info = rq; 480#endif 481 482 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 483 rq->hrtick_timer.function = hrtick; 484} 485#else /* CONFIG_SCHED_HRTICK */ 486static inline void hrtick_clear(struct rq *rq) 487{ 488} 489 490static inline void init_rq_hrtick(struct rq *rq) 491{ 492} 493 494static inline void init_hrtick(void) 495{ 496} 497#endif /* CONFIG_SCHED_HRTICK */ 498 499/* 500 * resched_task - mark a task 'to be rescheduled now'. 501 * 502 * On UP this means the setting of the need_resched flag, on SMP it 503 * might also involve a cross-CPU call to trigger the scheduler on 504 * the target CPU. 505 */ 506#ifdef CONFIG_SMP 507 508#ifndef tsk_is_polling 509#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 510#endif 511 512void resched_task(struct task_struct *p) 513{ 514 int cpu; 515 516 assert_raw_spin_locked(&task_rq(p)->lock); 517 518 if (test_tsk_need_resched(p)) 519 return; 520 521 set_tsk_need_resched(p); 522 523 cpu = task_cpu(p); 524 if (cpu == smp_processor_id()) 525 return; 526 527 /* NEED_RESCHED must be visible before we test polling */ 528 smp_mb(); 529 if (!tsk_is_polling(p)) 530 smp_send_reschedule(cpu); 531} 532 533void resched_cpu(int cpu) 534{ 535 struct rq *rq = cpu_rq(cpu); 536 unsigned long flags; 537 538 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 539 return; 540 resched_task(cpu_curr(cpu)); 541 raw_spin_unlock_irqrestore(&rq->lock, flags); 542} 543 544#ifdef CONFIG_NO_HZ 545/* 546 * In the semi idle case, use the nearest busy cpu for migrating timers 547 * from an idle cpu. This is good for power-savings. 548 * 549 * We don't do similar optimization for completely idle system, as 550 * selecting an idle cpu will add more delays to the timers than intended 551 * (as that cpu's timer base may not be uptodate wrt jiffies etc). 552 */ 553int get_nohz_timer_target(void) 554{ 555 int cpu = smp_processor_id(); 556 int i; 557 struct sched_domain *sd; 558 559 rcu_read_lock(); 560 for_each_domain(cpu, sd) { 561 for_each_cpu(i, sched_domain_span(sd)) { 562 if (!idle_cpu(i)) { 563 cpu = i; 564 goto unlock; 565 } 566 } 567 } 568unlock: 569 rcu_read_unlock(); 570 return cpu; 571} 572/* 573 * When add_timer_on() enqueues a timer into the timer wheel of an 574 * idle CPU then this timer might expire before the next timer event 575 * which is scheduled to wake up that CPU. In case of a completely 576 * idle system the next event might even be infinite time into the 577 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 578 * leaves the inner idle loop so the newly added timer is taken into 579 * account when the CPU goes back to idle and evaluates the timer 580 * wheel for the next timer event. 581 */ 582void wake_up_idle_cpu(int cpu) 583{ 584 struct rq *rq = cpu_rq(cpu); 585 586 if (cpu == smp_processor_id()) 587 return; 588 589 /* 590 * This is safe, as this function is called with the timer 591 * wheel base lock of (cpu) held. When the CPU is on the way 592 * to idle and has not yet set rq->curr to idle then it will 593 * be serialized on the timer wheel base lock and take the new 594 * timer into account automatically. 595 */ 596 if (rq->curr != rq->idle) 597 return; 598 599 /* 600 * We can set TIF_RESCHED on the idle task of the other CPU 601 * lockless. The worst case is that the other CPU runs the 602 * idle task through an additional NOOP schedule() 603 */ 604 set_tsk_need_resched(rq->idle); 605 606 /* NEED_RESCHED must be visible before we test polling */ 607 smp_mb(); 608 if (!tsk_is_polling(rq->idle)) 609 smp_send_reschedule(cpu); 610} 611 612static inline bool got_nohz_idle_kick(void) 613{ 614 int cpu = smp_processor_id(); 615 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 616} 617 618#else /* CONFIG_NO_HZ */ 619 620static inline bool got_nohz_idle_kick(void) 621{ 622 return false; 623} 624 625#endif /* CONFIG_NO_HZ */ 626 627void sched_avg_update(struct rq *rq) 628{ 629 s64 period = sched_avg_period(); 630 631 while ((s64)(rq->clock - rq->age_stamp) > period) { 632 /* 633 * Inline assembly required to prevent the compiler 634 * optimising this loop into a divmod call. 635 * See __iter_div_u64_rem() for another example of this. 636 */ 637 asm("" : "+rm" (rq->age_stamp)); 638 rq->age_stamp += period; 639 rq->rt_avg /= 2; 640 } 641} 642 643#else /* !CONFIG_SMP */ 644void resched_task(struct task_struct *p) 645{ 646 assert_raw_spin_locked(&task_rq(p)->lock); 647 set_tsk_need_resched(p); 648} 649#endif /* CONFIG_SMP */ 650 651#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 652 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 653/* 654 * Iterate task_group tree rooted at *from, calling @down when first entering a 655 * node and @up when leaving it for the final time. 656 * 657 * Caller must hold rcu_lock or sufficient equivalent. 658 */ 659int walk_tg_tree_from(struct task_group *from, 660 tg_visitor down, tg_visitor up, void *data) 661{ 662 struct task_group *parent, *child; 663 int ret; 664 665 parent = from; 666 667down: 668 ret = (*down)(parent, data); 669 if (ret) 670 goto out; 671 list_for_each_entry_rcu(child, &parent->children, siblings) { 672 parent = child; 673 goto down; 674 675up: 676 continue; 677 } 678 ret = (*up)(parent, data); 679 if (ret || parent == from) 680 goto out; 681 682 child = parent; 683 parent = parent->parent; 684 if (parent) 685 goto up; 686out: 687 return ret; 688} 689 690int tg_nop(struct task_group *tg, void *data) 691{ 692 return 0; 693} 694#endif 695 696static void set_load_weight(struct task_struct *p) 697{ 698 int prio = p->static_prio - MAX_RT_PRIO; 699 struct load_weight *load = &p->se.load; 700 701 /* 702 * SCHED_IDLE tasks get minimal weight: 703 */ 704 if (p->policy == SCHED_IDLE) { 705 load->weight = scale_load(WEIGHT_IDLEPRIO); 706 load->inv_weight = WMULT_IDLEPRIO; 707 return; 708 } 709 710 load->weight = scale_load(prio_to_weight[prio]); 711 load->inv_weight = prio_to_wmult[prio]; 712} 713 714static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 715{ 716 update_rq_clock(rq); 717 sched_info_queued(p); 718 p->sched_class->enqueue_task(rq, p, flags); 719} 720 721static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 722{ 723 update_rq_clock(rq); 724 sched_info_dequeued(p); 725 p->sched_class->dequeue_task(rq, p, flags); 726} 727 728void activate_task(struct rq *rq, struct task_struct *p, int flags) 729{ 730 if (task_contributes_to_load(p)) 731 rq->nr_uninterruptible--; 732 733 enqueue_task(rq, p, flags); 734} 735 736void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 737{ 738 if (task_contributes_to_load(p)) 739 rq->nr_uninterruptible++; 740 741 dequeue_task(rq, p, flags); 742} 743 744#ifdef CONFIG_IRQ_TIME_ACCOUNTING 745 746/* 747 * There are no locks covering percpu hardirq/softirq time. 748 * They are only modified in account_system_vtime, on corresponding CPU 749 * with interrupts disabled. So, writes are safe. 750 * They are read and saved off onto struct rq in update_rq_clock(). 751 * This may result in other CPU reading this CPU's irq time and can 752 * race with irq/account_system_vtime on this CPU. We would either get old 753 * or new value with a side effect of accounting a slice of irq time to wrong 754 * task when irq is in progress while we read rq->clock. That is a worthy 755 * compromise in place of having locks on each irq in account_system_time. 756 */ 757static DEFINE_PER_CPU(u64, cpu_hardirq_time); 758static DEFINE_PER_CPU(u64, cpu_softirq_time); 759 760static DEFINE_PER_CPU(u64, irq_start_time); 761static int sched_clock_irqtime; 762 763void enable_sched_clock_irqtime(void) 764{ 765 sched_clock_irqtime = 1; 766} 767 768void disable_sched_clock_irqtime(void) 769{ 770 sched_clock_irqtime = 0; 771} 772 773#ifndef CONFIG_64BIT 774static DEFINE_PER_CPU(seqcount_t, irq_time_seq); 775 776static inline void irq_time_write_begin(void) 777{ 778 __this_cpu_inc(irq_time_seq.sequence); 779 smp_wmb(); 780} 781 782static inline void irq_time_write_end(void) 783{ 784 smp_wmb(); 785 __this_cpu_inc(irq_time_seq.sequence); 786} 787 788static inline u64 irq_time_read(int cpu) 789{ 790 u64 irq_time; 791 unsigned seq; 792 793 do { 794 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); 795 irq_time = per_cpu(cpu_softirq_time, cpu) + 796 per_cpu(cpu_hardirq_time, cpu); 797 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); 798 799 return irq_time; 800} 801#else /* CONFIG_64BIT */ 802static inline void irq_time_write_begin(void) 803{ 804} 805 806static inline void irq_time_write_end(void) 807{ 808} 809 810static inline u64 irq_time_read(int cpu) 811{ 812 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); 813} 814#endif /* CONFIG_64BIT */ 815 816/* 817 * Called before incrementing preempt_count on {soft,}irq_enter 818 * and before decrementing preempt_count on {soft,}irq_exit. 819 */ 820void account_system_vtime(struct task_struct *curr) 821{ 822 unsigned long flags; 823 s64 delta; 824 int cpu; 825 826 if (!sched_clock_irqtime) 827 return; 828 829 local_irq_save(flags); 830 831 cpu = smp_processor_id(); 832 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); 833 __this_cpu_add(irq_start_time, delta); 834 835 irq_time_write_begin(); 836 /* 837 * We do not account for softirq time from ksoftirqd here. 838 * We want to continue accounting softirq time to ksoftirqd thread 839 * in that case, so as not to confuse scheduler with a special task 840 * that do not consume any time, but still wants to run. 841 */ 842 if (hardirq_count()) 843 __this_cpu_add(cpu_hardirq_time, delta); 844 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) 845 __this_cpu_add(cpu_softirq_time, delta); 846 847 irq_time_write_end(); 848 local_irq_restore(flags); 849} 850EXPORT_SYMBOL_GPL(account_system_vtime); 851 852#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 853 854#ifdef CONFIG_PARAVIRT 855static inline u64 steal_ticks(u64 steal) 856{ 857 if (unlikely(steal > NSEC_PER_SEC)) 858 return div_u64(steal, TICK_NSEC); 859 860 return __iter_div_u64_rem(steal, TICK_NSEC, &steal); 861} 862#endif 863 864static void update_rq_clock_task(struct rq *rq, s64 delta) 865{ 866/* 867 * In theory, the compile should just see 0 here, and optimize out the call 868 * to sched_rt_avg_update. But I don't trust it... 869 */ 870#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 871 s64 steal = 0, irq_delta = 0; 872#endif 873#ifdef CONFIG_IRQ_TIME_ACCOUNTING 874 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 875 876 /* 877 * Since irq_time is only updated on {soft,}irq_exit, we might run into 878 * this case when a previous update_rq_clock() happened inside a 879 * {soft,}irq region. 880 * 881 * When this happens, we stop ->clock_task and only update the 882 * prev_irq_time stamp to account for the part that fit, so that a next 883 * update will consume the rest. This ensures ->clock_task is 884 * monotonic. 885 * 886 * It does however cause some slight miss-attribution of {soft,}irq 887 * time, a more accurate solution would be to update the irq_time using 888 * the current rq->clock timestamp, except that would require using 889 * atomic ops. 890 */ 891 if (irq_delta > delta) 892 irq_delta = delta; 893 894 rq->prev_irq_time += irq_delta; 895 delta -= irq_delta; 896#endif 897#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 898 if (static_key_false((¶virt_steal_rq_enabled))) { 899 u64 st; 900 901 steal = paravirt_steal_clock(cpu_of(rq)); 902 steal -= rq->prev_steal_time_rq; 903 904 if (unlikely(steal > delta)) 905 steal = delta; 906 907 st = steal_ticks(steal); 908 steal = st * TICK_NSEC; 909 910 rq->prev_steal_time_rq += steal; 911 912 delta -= steal; 913 } 914#endif 915 916 rq->clock_task += delta; 917 918#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 919 if ((irq_delta + steal) && sched_feat(NONTASK_POWER)) 920 sched_rt_avg_update(rq, irq_delta + steal); 921#endif 922} 923 924#ifdef CONFIG_IRQ_TIME_ACCOUNTING 925static int irqtime_account_hi_update(void) 926{ 927 u64 *cpustat = kcpustat_this_cpu->cpustat; 928 unsigned long flags; 929 u64 latest_ns; 930 int ret = 0; 931 932 local_irq_save(flags); 933 latest_ns = this_cpu_read(cpu_hardirq_time); 934 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) 935 ret = 1; 936 local_irq_restore(flags); 937 return ret; 938} 939 940static int irqtime_account_si_update(void) 941{ 942 u64 *cpustat = kcpustat_this_cpu->cpustat; 943 unsigned long flags; 944 u64 latest_ns; 945 int ret = 0; 946 947 local_irq_save(flags); 948 latest_ns = this_cpu_read(cpu_softirq_time); 949 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) 950 ret = 1; 951 local_irq_restore(flags); 952 return ret; 953} 954 955#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 956 957#define sched_clock_irqtime (0) 958 959#endif 960 961void sched_set_stop_task(int cpu, struct task_struct *stop) 962{ 963 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 964 struct task_struct *old_stop = cpu_rq(cpu)->stop; 965 966 if (stop) { 967 /* 968 * Make it appear like a SCHED_FIFO task, its something 969 * userspace knows about and won't get confused about. 970 * 971 * Also, it will make PI more or less work without too 972 * much confusion -- but then, stop work should not 973 * rely on PI working anyway. 974 */ 975 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 976 977 stop->sched_class = &stop_sched_class; 978 } 979 980 cpu_rq(cpu)->stop = stop; 981 982 if (old_stop) { 983 /* 984 * Reset it back to a normal scheduling class so that 985 * it can die in pieces. 986 */ 987 old_stop->sched_class = &rt_sched_class; 988 } 989} 990 991/* 992 * __normal_prio - return the priority that is based on the static prio 993 */ 994static inline int __normal_prio(struct task_struct *p) 995{ 996 return p->static_prio; 997} 998 999/* 1000 * Calculate the expected normal priority: i.e. priority 1001 * without taking RT-inheritance into account. Might be 1002 * boosted by interactivity modifiers. Changes upon fork, 1003 * setprio syscalls, and whenever the interactivity 1004 * estimator recalculates. 1005 */ 1006static inline int normal_prio(struct task_struct *p) 1007{ 1008 int prio; 1009 1010 if (task_has_rt_policy(p)) 1011 prio = MAX_RT_PRIO-1 - p->rt_priority; 1012 else 1013 prio = __normal_prio(p); 1014 return prio; 1015} 1016 1017/* 1018 * Calculate the current priority, i.e. the priority 1019 * taken into account by the scheduler. This value might 1020 * be boosted by RT tasks, or might be boosted by 1021 * interactivity modifiers. Will be RT if the task got 1022 * RT-boosted. If not then it returns p->normal_prio. 1023 */ 1024static int effective_prio(struct task_struct *p) 1025{ 1026 p->normal_prio = normal_prio(p); 1027 /* 1028 * If we are RT tasks or we were boosted to RT priority, 1029 * keep the priority unchanged. Otherwise, update priority 1030 * to the normal priority: 1031 */ 1032 if (!rt_prio(p->prio)) 1033 return p->normal_prio; 1034 return p->prio; 1035} 1036 1037/** 1038 * task_curr - is this task currently executing on a CPU? 1039 * @p: the task in question. 1040 */ 1041inline int task_curr(const struct task_struct *p) 1042{ 1043 return cpu_curr(task_cpu(p)) == p; 1044} 1045 1046static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1047 const struct sched_class *prev_class, 1048 int oldprio) 1049{ 1050 if (prev_class != p->sched_class) { 1051 if (prev_class->switched_from) 1052 prev_class->switched_from(rq, p); 1053 p->sched_class->switched_to(rq, p); 1054 } else if (oldprio != p->prio) 1055 p->sched_class->prio_changed(rq, p, oldprio); 1056} 1057 1058void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1059{ 1060 const struct sched_class *class; 1061 1062 if (p->sched_class == rq->curr->sched_class) { 1063 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1064 } else { 1065 for_each_class(class) { 1066 if (class == rq->curr->sched_class) 1067 break; 1068 if (class == p->sched_class) { 1069 resched_task(rq->curr); 1070 break; 1071 } 1072 } 1073 } 1074 1075 /* 1076 * A queue event has occurred, and we're going to schedule. In 1077 * this case, we can save a useless back to back clock update. 1078 */ 1079 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) 1080 rq->skip_clock_update = 1; 1081} 1082 1083#ifdef CONFIG_SMP 1084void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1085{ 1086#ifdef CONFIG_SCHED_DEBUG 1087 /* 1088 * We should never call set_task_cpu() on a blocked task, 1089 * ttwu() will sort out the placement. 1090 */ 1091 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1092 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); 1093 1094#ifdef CONFIG_LOCKDEP 1095 /* 1096 * The caller should hold either p->pi_lock or rq->lock, when changing 1097 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1098 * 1099 * sched_move_task() holds both and thus holding either pins the cgroup, 1100 * see set_task_rq(). 1101 * 1102 * Furthermore, all task_rq users should acquire both locks, see 1103 * task_rq_lock(). 1104 */ 1105 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1106 lockdep_is_held(&task_rq(p)->lock))); 1107#endif 1108#endif 1109 1110 trace_sched_migrate_task(p, new_cpu); 1111 1112 if (task_cpu(p) != new_cpu) { 1113 p->se.nr_migrations++; 1114 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); 1115 } 1116 1117 __set_task_cpu(p, new_cpu); 1118} 1119 1120struct migration_arg { 1121 struct task_struct *task; 1122 int dest_cpu; 1123}; 1124 1125static int migration_cpu_stop(void *data); 1126 1127/* 1128 * wait_task_inactive - wait for a thread to unschedule. 1129 * 1130 * If @match_state is nonzero, it's the @p->state value just checked and 1131 * not expected to change. If it changes, i.e. @p might have woken up, 1132 * then return zero. When we succeed in waiting for @p to be off its CPU, 1133 * we return a positive number (its total switch count). If a second call 1134 * a short while later returns the same number, the caller can be sure that 1135 * @p has remained unscheduled the whole time. 1136 * 1137 * The caller must ensure that the task *will* unschedule sometime soon, 1138 * else this function might spin for a *long* time. This function can't 1139 * be called with interrupts off, or it may introduce deadlock with 1140 * smp_call_function() if an IPI is sent by the same process we are 1141 * waiting to become inactive. 1142 */ 1143unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1144{ 1145 unsigned long flags; 1146 int running, on_rq; 1147 unsigned long ncsw; 1148 struct rq *rq; 1149 1150 for (;;) { 1151 /* 1152 * We do the initial early heuristics without holding 1153 * any task-queue locks at all. We'll only try to get 1154 * the runqueue lock when things look like they will 1155 * work out! 1156 */ 1157 rq = task_rq(p); 1158 1159 /* 1160 * If the task is actively running on another CPU 1161 * still, just relax and busy-wait without holding 1162 * any locks. 1163 * 1164 * NOTE! Since we don't hold any locks, it's not 1165 * even sure that "rq" stays as the right runqueue! 1166 * But we don't care, since "task_running()" will 1167 * return false if the runqueue has changed and p 1168 * is actually now running somewhere else! 1169 */ 1170 while (task_running(rq, p)) { 1171 if (match_state && unlikely(p->state != match_state)) 1172 return 0; 1173 cpu_relax(); 1174 } 1175 1176 /* 1177 * Ok, time to look more closely! We need the rq 1178 * lock now, to be *sure*. If we're wrong, we'll 1179 * just go back and repeat. 1180 */ 1181 rq = task_rq_lock(p, &flags); 1182 trace_sched_wait_task(p); 1183 running = task_running(rq, p); 1184 on_rq = p->on_rq; 1185 ncsw = 0; 1186 if (!match_state || p->state == match_state) 1187 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1188 task_rq_unlock(rq, p, &flags); 1189 1190 /* 1191 * If it changed from the expected state, bail out now. 1192 */ 1193 if (unlikely(!ncsw)) 1194 break; 1195 1196 /* 1197 * Was it really running after all now that we 1198 * checked with the proper locks actually held? 1199 * 1200 * Oops. Go back and try again.. 1201 */ 1202 if (unlikely(running)) { 1203 cpu_relax(); 1204 continue; 1205 } 1206 1207 /* 1208 * It's not enough that it's not actively running, 1209 * it must be off the runqueue _entirely_, and not 1210 * preempted! 1211 * 1212 * So if it was still runnable (but just not actively 1213 * running right now), it's preempted, and we should 1214 * yield - it could be a while. 1215 */ 1216 if (unlikely(on_rq)) { 1217 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1218 1219 set_current_state(TASK_UNINTERRUPTIBLE); 1220 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1221 continue; 1222 } 1223 1224 /* 1225 * Ahh, all good. It wasn't running, and it wasn't 1226 * runnable, which means that it will never become 1227 * running in the future either. We're all done! 1228 */ 1229 break; 1230 } 1231 1232 return ncsw; 1233} 1234 1235/*** 1236 * kick_process - kick a running thread to enter/exit the kernel 1237 * @p: the to-be-kicked thread 1238 * 1239 * Cause a process which is running on another CPU to enter 1240 * kernel-mode, without any delay. (to get signals handled.) 1241 * 1242 * NOTE: this function doesn't have to take the runqueue lock, 1243 * because all it wants to ensure is that the remote task enters 1244 * the kernel. If the IPI races and the task has been migrated 1245 * to another CPU then no harm is done and the purpose has been 1246 * achieved as well. 1247 */ 1248void kick_process(struct task_struct *p) 1249{ 1250 int cpu; 1251 1252 preempt_disable(); 1253 cpu = task_cpu(p); 1254 if ((cpu != smp_processor_id()) && task_curr(p)) 1255 smp_send_reschedule(cpu); 1256 preempt_enable(); 1257} 1258EXPORT_SYMBOL_GPL(kick_process); 1259#endif /* CONFIG_SMP */ 1260 1261#ifdef CONFIG_SMP 1262/* 1263 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1264 */ 1265static int select_fallback_rq(int cpu, struct task_struct *p) 1266{ 1267 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); 1268 enum { cpuset, possible, fail } state = cpuset; 1269 int dest_cpu; 1270 1271 /* Look for allowed, online CPU in same node. */ 1272 for_each_cpu(dest_cpu, nodemask) { 1273 if (!cpu_online(dest_cpu)) 1274 continue; 1275 if (!cpu_active(dest_cpu)) 1276 continue; 1277 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1278 return dest_cpu; 1279 } 1280 1281 for (;;) { 1282 /* Any allowed, online CPU? */ 1283 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1284 if (!cpu_online(dest_cpu)) 1285 continue; 1286 if (!cpu_active(dest_cpu)) 1287 continue; 1288 goto out; 1289 } 1290 1291 switch (state) { 1292 case cpuset: 1293 /* No more Mr. Nice Guy. */ 1294 cpuset_cpus_allowed_fallback(p); 1295 state = possible; 1296 break; 1297 1298 case possible: 1299 do_set_cpus_allowed(p, cpu_possible_mask); 1300 state = fail; 1301 break; 1302 1303 case fail: 1304 BUG(); 1305 break; 1306 } 1307 } 1308 1309out: 1310 if (state != cpuset) { 1311 /* 1312 * Don't tell them about moving exiting tasks or 1313 * kernel threads (both mm NULL), since they never 1314 * leave kernel. 1315 */ 1316 if (p->mm && printk_ratelimit()) { 1317 printk_sched("process %d (%s) no longer affine to cpu%d\n", 1318 task_pid_nr(p), p->comm, cpu); 1319 } 1320 } 1321 1322 return dest_cpu; 1323} 1324 1325/* 1326 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1327 */ 1328static inline 1329int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 1330{ 1331 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); 1332 1333 /* 1334 * In order not to call set_task_cpu() on a blocking task we need 1335 * to rely on ttwu() to place the task on a valid ->cpus_allowed 1336 * cpu. 1337 * 1338 * Since this is common to all placement strategies, this lives here. 1339 * 1340 * [ this allows ->select_task() to simply return task_cpu(p) and 1341 * not worry about this generic constraint ] 1342 */ 1343 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || 1344 !cpu_online(cpu))) 1345 cpu = select_fallback_rq(task_cpu(p), p); 1346 1347 return cpu; 1348} 1349 1350static void update_avg(u64 *avg, u64 sample) 1351{ 1352 s64 diff = sample - *avg; 1353 *avg += diff >> 3; 1354} 1355#endif 1356 1357static void 1358ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 1359{ 1360#ifdef CONFIG_SCHEDSTATS 1361 struct rq *rq = this_rq(); 1362 1363#ifdef CONFIG_SMP 1364 int this_cpu = smp_processor_id(); 1365 1366 if (cpu == this_cpu) { 1367 schedstat_inc(rq, ttwu_local); 1368 schedstat_inc(p, se.statistics.nr_wakeups_local); 1369 } else { 1370 struct sched_domain *sd; 1371 1372 schedstat_inc(p, se.statistics.nr_wakeups_remote); 1373 rcu_read_lock(); 1374 for_each_domain(this_cpu, sd) { 1375 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 1376 schedstat_inc(sd, ttwu_wake_remote); 1377 break; 1378 } 1379 } 1380 rcu_read_unlock(); 1381 } 1382 1383 if (wake_flags & WF_MIGRATED) 1384 schedstat_inc(p, se.statistics.nr_wakeups_migrate); 1385 1386#endif /* CONFIG_SMP */ 1387 1388 schedstat_inc(rq, ttwu_count); 1389 schedstat_inc(p, se.statistics.nr_wakeups); 1390 1391 if (wake_flags & WF_SYNC) 1392 schedstat_inc(p, se.statistics.nr_wakeups_sync); 1393 1394#endif /* CONFIG_SCHEDSTATS */ 1395} 1396 1397static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1398{ 1399 activate_task(rq, p, en_flags); 1400 p->on_rq = 1; 1401 1402 /* if a worker is waking up, notify workqueue */ 1403 if (p->flags & PF_WQ_WORKER) 1404 wq_worker_waking_up(p, cpu_of(rq)); 1405} 1406 1407/* 1408 * Mark the task runnable and perform wakeup-preemption. 1409 */ 1410static void 1411ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 1412{ 1413 trace_sched_wakeup(p, true); 1414 check_preempt_curr(rq, p, wake_flags); 1415 1416 p->state = TASK_RUNNING; 1417#ifdef CONFIG_SMP 1418 if (p->sched_class->task_woken) 1419 p->sched_class->task_woken(rq, p); 1420 1421 if (rq->idle_stamp) { 1422 u64 delta = rq->clock - rq->idle_stamp; 1423 u64 max = 2*sysctl_sched_migration_cost; 1424 1425 if (delta > max) 1426 rq->avg_idle = max; 1427 else 1428 update_avg(&rq->avg_idle, delta); 1429 rq->idle_stamp = 0; 1430 } 1431#endif 1432} 1433 1434static void 1435ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) 1436{ 1437#ifdef CONFIG_SMP 1438 if (p->sched_contributes_to_load) 1439 rq->nr_uninterruptible--; 1440#endif 1441 1442 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); 1443 ttwu_do_wakeup(rq, p, wake_flags); 1444} 1445 1446/* 1447 * Called in case the task @p isn't fully descheduled from its runqueue, 1448 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 1449 * since all we need to do is flip p->state to TASK_RUNNING, since 1450 * the task is still ->on_rq. 1451 */ 1452static int ttwu_remote(struct task_struct *p, int wake_flags) 1453{ 1454 struct rq *rq; 1455 int ret = 0; 1456 1457 rq = __task_rq_lock(p); 1458 if (p->on_rq) { 1459 ttwu_do_wakeup(rq, p, wake_flags); 1460 ret = 1; 1461 } 1462 __task_rq_unlock(rq); 1463 1464 return ret; 1465} 1466 1467#ifdef CONFIG_SMP 1468static void sched_ttwu_pending(void) 1469{ 1470 struct rq *rq = this_rq(); 1471 struct llist_node *llist = llist_del_all(&rq->wake_list); 1472 struct task_struct *p; 1473 1474 raw_spin_lock(&rq->lock); 1475 1476 while (llist) { 1477 p = llist_entry(llist, struct task_struct, wake_entry); 1478 llist = llist_next(llist); 1479 ttwu_do_activate(rq, p, 0); 1480 } 1481 1482 raw_spin_unlock(&rq->lock); 1483} 1484 1485void scheduler_ipi(void) 1486{ 1487 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 1488 return; 1489 1490 /* 1491 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 1492 * traditionally all their work was done from the interrupt return 1493 * path. Now that we actually do some work, we need to make sure 1494 * we do call them. 1495 * 1496 * Some archs already do call them, luckily irq_enter/exit nest 1497 * properly. 1498 * 1499 * Arguably we should visit all archs and update all handlers, 1500 * however a fair share of IPIs are still resched only so this would 1501 * somewhat pessimize the simple resched case. 1502 */ 1503 irq_enter(); 1504 sched_ttwu_pending(); 1505 1506 /* 1507 * Check if someone kicked us for doing the nohz idle load balance. 1508 */ 1509 if (unlikely(got_nohz_idle_kick() && !need_resched())) { 1510 this_rq()->idle_balance = 1; 1511 raise_softirq_irqoff(SCHED_SOFTIRQ); 1512 } 1513 irq_exit(); 1514} 1515 1516static void ttwu_queue_remote(struct task_struct *p, int cpu) 1517{ 1518 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) 1519 smp_send_reschedule(cpu); 1520} 1521 1522#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1523static int ttwu_activate_remote(struct task_struct *p, int wake_flags) 1524{ 1525 struct rq *rq; 1526 int ret = 0; 1527 1528 rq = __task_rq_lock(p); 1529 if (p->on_cpu) { 1530 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 1531 ttwu_do_wakeup(rq, p, wake_flags); 1532 ret = 1; 1533 } 1534 __task_rq_unlock(rq); 1535 1536 return ret; 1537 1538} 1539#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1540 1541bool cpus_share_cache(int this_cpu, int that_cpu) 1542{ 1543 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1544} 1545#endif /* CONFIG_SMP */ 1546 1547static void ttwu_queue(struct task_struct *p, int cpu) 1548{ 1549 struct rq *rq = cpu_rq(cpu); 1550 1551#if defined(CONFIG_SMP) 1552 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1553 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1554 ttwu_queue_remote(p, cpu); 1555 return; 1556 } 1557#endif 1558 1559 raw_spin_lock(&rq->lock); 1560 ttwu_do_activate(rq, p, 0); 1561 raw_spin_unlock(&rq->lock); 1562} 1563 1564/** 1565 * try_to_wake_up - wake up a thread 1566 * @p: the thread to be awakened 1567 * @state: the mask of task states that can be woken 1568 * @wake_flags: wake modifier flags (WF_*) 1569 * 1570 * Put it on the run-queue if it's not already there. The "current" 1571 * thread is always on the run-queue (except when the actual 1572 * re-schedule is in progress), and as such you're allowed to do 1573 * the simpler "current->state = TASK_RUNNING" to mark yourself 1574 * runnable without the overhead of this. 1575 * 1576 * Returns %true if @p was woken up, %false if it was already running 1577 * or @state didn't match @p's state. 1578 */ 1579static int 1580try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 1581{ 1582 unsigned long flags; 1583 int cpu, success = 0; 1584 1585 smp_wmb(); 1586 raw_spin_lock_irqsave(&p->pi_lock, flags); 1587 if (!(p->state & state)) 1588 goto out; 1589 1590 success = 1; /* we're going to change ->state */ 1591 cpu = task_cpu(p); 1592 1593 if (p->on_rq && ttwu_remote(p, wake_flags)) 1594 goto stat; 1595 1596#ifdef CONFIG_SMP 1597 /* 1598 * If the owning (remote) cpu is still in the middle of schedule() with 1599 * this task as prev, wait until its done referencing the task. 1600 */ 1601 while (p->on_cpu) { 1602#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1603 /* 1604 * In case the architecture enables interrupts in 1605 * context_switch(), we cannot busy wait, since that 1606 * would lead to deadlocks when an interrupt hits and 1607 * tries to wake up @prev. So bail and do a complete 1608 * remote wakeup. 1609 */ 1610 if (ttwu_activate_remote(p, wake_flags)) 1611 goto stat; 1612#else 1613 cpu_relax(); 1614#endif 1615 } 1616 /* 1617 * Pairs with the smp_wmb() in finish_lock_switch(). 1618 */ 1619 smp_rmb(); 1620 1621 p->sched_contributes_to_load = !!task_contributes_to_load(p); 1622 p->state = TASK_WAKING; 1623 1624 if (p->sched_class->task_waking) 1625 p->sched_class->task_waking(p); 1626 1627 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 1628 if (task_cpu(p) != cpu) { 1629 wake_flags |= WF_MIGRATED; 1630 set_task_cpu(p, cpu); 1631 } 1632#endif /* CONFIG_SMP */ 1633 1634 ttwu_queue(p, cpu); 1635stat: 1636 ttwu_stat(p, cpu, wake_flags); 1637out: 1638 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 1639 1640 return success; 1641} 1642 1643/** 1644 * try_to_wake_up_local - try to wake up a local task with rq lock held 1645 * @p: the thread to be awakened 1646 * 1647 * Put @p on the run-queue if it's not already there. The caller must 1648 * ensure that this_rq() is locked, @p is bound to this_rq() and not 1649 * the current task. 1650 */ 1651static void try_to_wake_up_local(struct task_struct *p) 1652{ 1653 struct rq *rq = task_rq(p); 1654 1655 BUG_ON(rq != this_rq()); 1656 BUG_ON(p == current); 1657 lockdep_assert_held(&rq->lock); 1658 1659 if (!raw_spin_trylock(&p->pi_lock)) { 1660 raw_spin_unlock(&rq->lock); 1661 raw_spin_lock(&p->pi_lock); 1662 raw_spin_lock(&rq->lock); 1663 } 1664 1665 if (!(p->state & TASK_NORMAL)) 1666 goto out; 1667 1668 if (!p->on_rq) 1669 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 1670 1671 ttwu_do_wakeup(rq, p, 0); 1672 ttwu_stat(p, smp_processor_id(), 0); 1673out: 1674 raw_spin_unlock(&p->pi_lock); 1675} 1676 1677/** 1678 * wake_up_process - Wake up a specific process 1679 * @p: The process to be woken up. 1680 * 1681 * Attempt to wake up the nominated process and move it to the set of runnable 1682 * processes. Returns 1 if the process was woken up, 0 if it was already 1683 * running. 1684 * 1685 * It may be assumed that this function implies a write memory barrier before 1686 * changing the task state if and only if any tasks are woken up. 1687 */ 1688int wake_up_process(struct task_struct *p) 1689{ 1690 return try_to_wake_up(p, TASK_ALL, 0); 1691} 1692EXPORT_SYMBOL(wake_up_process); 1693 1694int wake_up_state(struct task_struct *p, unsigned int state) 1695{ 1696 return try_to_wake_up(p, state, 0); 1697} 1698 1699/* 1700 * Perform scheduler related setup for a newly forked process p. 1701 * p is forked by current. 1702 * 1703 * __sched_fork() is basic setup used by init_idle() too: 1704 */ 1705static void __sched_fork(struct task_struct *p) 1706{ 1707 p->on_rq = 0; 1708 1709 p->se.on_rq = 0; 1710 p->se.exec_start = 0; 1711 p->se.sum_exec_runtime = 0; 1712 p->se.prev_sum_exec_runtime = 0; 1713 p->se.nr_migrations = 0; 1714 p->se.vruntime = 0; 1715 INIT_LIST_HEAD(&p->se.group_node); 1716 1717#ifdef CONFIG_SCHEDSTATS 1718 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 1719#endif 1720 1721 INIT_LIST_HEAD(&p->rt.run_list); 1722 1723#ifdef CONFIG_PREEMPT_NOTIFIERS 1724 INIT_HLIST_HEAD(&p->preempt_notifiers); 1725#endif 1726} 1727 1728/* 1729 * fork()/clone()-time setup: 1730 */ 1731void sched_fork(struct task_struct *p) 1732{ 1733 unsigned long flags; 1734 int cpu = get_cpu(); 1735 1736 __sched_fork(p); 1737 /* 1738 * We mark the process as running here. This guarantees that 1739 * nobody will actually run it, and a signal or other external 1740 * event cannot wake it up and insert it on the runqueue either. 1741 */ 1742 p->state = TASK_RUNNING; 1743 1744 /* 1745 * Make sure we do not leak PI boosting priority to the child. 1746 */ 1747 p->prio = current->normal_prio; 1748 1749 /* 1750 * Revert to default priority/policy on fork if requested. 1751 */ 1752 if (unlikely(p->sched_reset_on_fork)) { 1753 if (task_has_rt_policy(p)) { 1754 p->policy = SCHED_NORMAL; 1755 p->static_prio = NICE_TO_PRIO(0); 1756 p->rt_priority = 0; 1757 } else if (PRIO_TO_NICE(p->static_prio) < 0) 1758 p->static_prio = NICE_TO_PRIO(0); 1759 1760 p->prio = p->normal_prio = __normal_prio(p); 1761 set_load_weight(p); 1762 1763 /* 1764 * We don't need the reset flag anymore after the fork. It has 1765 * fulfilled its duty: 1766 */ 1767 p->sched_reset_on_fork = 0; 1768 } 1769 1770 if (!rt_prio(p->prio)) 1771 p->sched_class = &fair_sched_class; 1772 1773 if (p->sched_class->task_fork) 1774 p->sched_class->task_fork(p); 1775 1776 /* 1777 * The child is not yet in the pid-hash so no cgroup attach races, 1778 * and the cgroup is pinned to this child due to cgroup_fork() 1779 * is ran before sched_fork(). 1780 * 1781 * Silence PROVE_RCU. 1782 */ 1783 raw_spin_lock_irqsave(&p->pi_lock, flags); 1784 set_task_cpu(p, cpu); 1785 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 1786 1787#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1788 if (likely(sched_info_on())) 1789 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1790#endif 1791#if defined(CONFIG_SMP) 1792 p->on_cpu = 0; 1793#endif 1794#ifdef CONFIG_PREEMPT_COUNT 1795 /* Want to start with kernel preemption disabled. */ 1796 task_thread_info(p)->preempt_count = 1; 1797#endif 1798#ifdef CONFIG_SMP 1799 plist_node_init(&p->pushable_tasks, MAX_PRIO); 1800#endif 1801 1802 put_cpu(); 1803} 1804 1805/* 1806 * wake_up_new_task - wake up a newly created task for the first time. 1807 * 1808 * This function will do some initial scheduler statistics housekeeping 1809 * that must be done for every newly created context, then puts the task 1810 * on the runqueue and wakes it. 1811 */ 1812void wake_up_new_task(struct task_struct *p) 1813{ 1814 unsigned long flags; 1815 struct rq *rq; 1816 1817 raw_spin_lock_irqsave(&p->pi_lock, flags); 1818#ifdef CONFIG_SMP 1819 /* 1820 * Fork balancing, do it here and not earlier because: 1821 * - cpus_allowed can change in the fork path 1822 * - any previously selected cpu might disappear through hotplug 1823 */ 1824 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0)); 1825#endif 1826 1827 rq = __task_rq_lock(p); 1828 activate_task(rq, p, 0); 1829 p->on_rq = 1; 1830 trace_sched_wakeup_new(p, true); 1831 check_preempt_curr(rq, p, WF_FORK); 1832#ifdef CONFIG_SMP 1833 if (p->sched_class->task_woken) 1834 p->sched_class->task_woken(rq, p); 1835#endif 1836 task_rq_unlock(rq, p, &flags); 1837} 1838 1839#ifdef CONFIG_PREEMPT_NOTIFIERS 1840 1841/** 1842 * preempt_notifier_register - tell me when current is being preempted & rescheduled 1843 * @notifier: notifier struct to register 1844 */ 1845void preempt_notifier_register(struct preempt_notifier *notifier) 1846{ 1847 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 1848} 1849EXPORT_SYMBOL_GPL(preempt_notifier_register); 1850 1851/** 1852 * preempt_notifier_unregister - no longer interested in preemption notifications 1853 * @notifier: notifier struct to unregister 1854 * 1855 * This is safe to call from within a preemption notifier. 1856 */ 1857void preempt_notifier_unregister(struct preempt_notifier *notifier) 1858{ 1859 hlist_del(¬ifier->link); 1860} 1861EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 1862 1863static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 1864{ 1865 struct preempt_notifier *notifier; 1866 struct hlist_node *node; 1867 1868 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1869 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 1870} 1871 1872static void 1873fire_sched_out_preempt_notifiers(struct task_struct *curr, 1874 struct task_struct *next) 1875{ 1876 struct preempt_notifier *notifier; 1877 struct hlist_node *node; 1878 1879 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1880 notifier->ops->sched_out(notifier, next); 1881} 1882 1883#else /* !CONFIG_PREEMPT_NOTIFIERS */ 1884 1885static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 1886{ 1887} 1888 1889static void 1890fire_sched_out_preempt_notifiers(struct task_struct *curr, 1891 struct task_struct *next) 1892{ 1893} 1894 1895#endif /* CONFIG_PREEMPT_NOTIFIERS */ 1896 1897/** 1898 * prepare_task_switch - prepare to switch tasks 1899 * @rq: the runqueue preparing to switch 1900 * @prev: the current task that is being switched out 1901 * @next: the task we are going to switch to. 1902 * 1903 * This is called with the rq lock held and interrupts off. It must 1904 * be paired with a subsequent finish_task_switch after the context 1905 * switch. 1906 * 1907 * prepare_task_switch sets up locking and calls architecture specific 1908 * hooks. 1909 */ 1910static inline void 1911prepare_task_switch(struct rq *rq, struct task_struct *prev, 1912 struct task_struct *next) 1913{ 1914 sched_info_switch(prev, next); 1915 perf_event_task_sched_out(prev, next); 1916 fire_sched_out_preempt_notifiers(prev, next); 1917 prepare_lock_switch(rq, next); 1918 prepare_arch_switch(next); 1919 trace_sched_switch(prev, next); 1920} 1921 1922/** 1923 * finish_task_switch - clean up after a task-switch 1924 * @rq: runqueue associated with task-switch 1925 * @prev: the thread we just switched away from. 1926 * 1927 * finish_task_switch must be called after the context switch, paired 1928 * with a prepare_task_switch call before the context switch. 1929 * finish_task_switch will reconcile locking set up by prepare_task_switch, 1930 * and do any other architecture-specific cleanup actions. 1931 * 1932 * Note that we may have delayed dropping an mm in context_switch(). If 1933 * so, we finish that here outside of the runqueue lock. (Doing it 1934 * with the lock held can cause deadlocks; see schedule() for 1935 * details.) 1936 */ 1937static void finish_task_switch(struct rq *rq, struct task_struct *prev) 1938 __releases(rq->lock) 1939{ 1940 struct mm_struct *mm = rq->prev_mm; 1941 long prev_state; 1942 1943 rq->prev_mm = NULL; 1944 1945 /* 1946 * A task struct has one reference for the use as "current". 1947 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 1948 * schedule one last time. The schedule call will never return, and 1949 * the scheduled task must drop that reference. 1950 * The test for TASK_DEAD must occur while the runqueue locks are 1951 * still held, otherwise prev could be scheduled on another cpu, die 1952 * there before we look at prev->state, and then the reference would 1953 * be dropped twice. 1954 * Manfred Spraul <manfred@colorfullife.com> 1955 */ 1956 prev_state = prev->state; 1957 finish_arch_switch(prev); 1958#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1959 local_irq_disable(); 1960#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1961 perf_event_task_sched_in(prev, current); 1962#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1963 local_irq_enable(); 1964#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1965 finish_lock_switch(rq, prev); 1966 finish_arch_post_lock_switch(); 1967 1968 fire_sched_in_preempt_notifiers(current); 1969 if (mm) 1970 mmdrop(mm); 1971 if (unlikely(prev_state == TASK_DEAD)) { 1972 /* 1973 * Remove function-return probe instances associated with this 1974 * task and put them back on the free list. 1975 */ 1976 kprobe_flush_task(prev); 1977 put_task_struct(prev); 1978 } 1979} 1980 1981#ifdef CONFIG_SMP 1982 1983/* assumes rq->lock is held */ 1984static inline void pre_schedule(struct rq *rq, struct task_struct *prev) 1985{ 1986 if (prev->sched_class->pre_schedule) 1987 prev->sched_class->pre_schedule(rq, prev); 1988} 1989 1990/* rq->lock is NOT held, but preemption is disabled */ 1991static inline void post_schedule(struct rq *rq) 1992{ 1993 if (rq->post_schedule) { 1994 unsigned long flags; 1995 1996 raw_spin_lock_irqsave(&rq->lock, flags); 1997 if (rq->curr->sched_class->post_schedule) 1998 rq->curr->sched_class->post_schedule(rq); 1999 raw_spin_unlock_irqrestore(&rq->lock, flags); 2000 2001 rq->post_schedule = 0; 2002 } 2003} 2004 2005#else 2006 2007static inline void pre_schedule(struct rq *rq, struct task_struct *p) 2008{ 2009} 2010 2011static inline void post_schedule(struct rq *rq) 2012{ 2013} 2014 2015#endif 2016 2017/** 2018 * schedule_tail - first thing a freshly forked thread must call. 2019 * @prev: the thread we just switched away from. 2020 */ 2021asmlinkage void schedule_tail(struct task_struct *prev) 2022 __releases(rq->lock) 2023{ 2024 struct rq *rq = this_rq(); 2025 2026 finish_task_switch(rq, prev); 2027 2028 /* 2029 * FIXME: do we need to worry about rq being invalidated by the 2030 * task_switch? 2031 */ 2032 post_schedule(rq); 2033 2034#ifdef __ARCH_WANT_UNLOCKED_CTXSW 2035 /* In this case, finish_task_switch does not reenable preemption */ 2036 preempt_enable(); 2037#endif 2038 if (current->set_child_tid) 2039 put_user(task_pid_vnr(current), current->set_child_tid); 2040} 2041 2042/* 2043 * context_switch - switch to the new MM and the new 2044 * thread's register state. 2045 */ 2046static inline void 2047context_switch(struct rq *rq, struct task_struct *prev, 2048 struct task_struct *next) 2049{ 2050 struct mm_struct *mm, *oldmm; 2051 2052 prepare_task_switch(rq, prev, next); 2053 2054 mm = next->mm; 2055 oldmm = prev->active_mm; 2056 /* 2057 * For paravirt, this is coupled with an exit in switch_to to 2058 * combine the page table reload and the switch backend into 2059 * one hypercall. 2060 */ 2061 arch_start_context_switch(prev); 2062 2063 if (!mm) { 2064 next->active_mm = oldmm; 2065 atomic_inc(&oldmm->mm_count); 2066 enter_lazy_tlb(oldmm, next); 2067 } else 2068 switch_mm(oldmm, mm, next); 2069 2070 if (!prev->mm) { 2071 prev->active_mm = NULL; 2072 rq->prev_mm = oldmm; 2073 } 2074 /* 2075 * Since the runqueue lock will be released by the next 2076 * task (which is an invalid locking op but in the case 2077 * of the scheduler it's an obvious special-case), so we 2078 * do an early lockdep release here: 2079 */ 2080#ifndef __ARCH_WANT_UNLOCKED_CTXSW 2081 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2082#endif 2083 2084 /* Here we just switch the register state and the stack. */ 2085 rcu_switch_from(prev); 2086 switch_to(prev, next, prev); 2087 2088 barrier(); 2089 /* 2090 * this_rq must be evaluated again because prev may have moved 2091 * CPUs since it called schedule(), thus the 'rq' on its stack 2092 * frame will be invalid. 2093 */ 2094 finish_task_switch(this_rq(), prev); 2095} 2096 2097/* 2098 * nr_running, nr_uninterruptible and nr_context_switches: 2099 * 2100 * externally visible scheduler statistics: current number of runnable 2101 * threads, current number of uninterruptible-sleeping threads, total 2102 * number of context switches performed since bootup. 2103 */ 2104unsigned long nr_running(void) 2105{ 2106 unsigned long i, sum = 0; 2107 2108 for_each_online_cpu(i) 2109 sum += cpu_rq(i)->nr_running; 2110 2111 return sum; 2112} 2113 2114unsigned long nr_uninterruptible(void) 2115{ 2116 unsigned long i, sum = 0; 2117 2118 for_each_possible_cpu(i) 2119 sum += cpu_rq(i)->nr_uninterruptible; 2120 2121 /* 2122 * Since we read the counters lockless, it might be slightly 2123 * inaccurate. Do not allow it to go below zero though: 2124 */ 2125 if (unlikely((long)sum < 0)) 2126 sum = 0; 2127 2128 return sum; 2129} 2130 2131unsigned long long nr_context_switches(void) 2132{ 2133 int i; 2134 unsigned long long sum = 0; 2135 2136 for_each_possible_cpu(i) 2137 sum += cpu_rq(i)->nr_switches; 2138 2139 return sum; 2140} 2141 2142unsigned long nr_iowait(void) 2143{ 2144 unsigned long i, sum = 0; 2145 2146 for_each_possible_cpu(i) 2147 sum += atomic_read(&cpu_rq(i)->nr_iowait); 2148 2149 return sum; 2150} 2151 2152unsigned long nr_iowait_cpu(int cpu) 2153{ 2154 struct rq *this = cpu_rq(cpu); 2155 return atomic_read(&this->nr_iowait); 2156} 2157 2158unsigned long this_cpu_load(void) 2159{ 2160 struct rq *this = this_rq(); 2161 return this->cpu_load[0]; 2162} 2163 2164 2165/* Variables and functions for calc_load */ 2166static atomic_long_t calc_load_tasks; 2167static unsigned long calc_load_update; 2168unsigned long avenrun[3]; 2169EXPORT_SYMBOL(avenrun); 2170 2171static long calc_load_fold_active(struct rq *this_rq) 2172{ 2173 long nr_active, delta = 0; 2174 2175 nr_active = this_rq->nr_running; 2176 nr_active += (long) this_rq->nr_uninterruptible; 2177 2178 if (nr_active != this_rq->calc_load_active) { 2179 delta = nr_active - this_rq->calc_load_active; 2180 this_rq->calc_load_active = nr_active; 2181 } 2182 2183 return delta; 2184} 2185 2186static unsigned long 2187calc_load(unsigned long load, unsigned long exp, unsigned long active) 2188{ 2189 load *= exp; 2190 load += active * (FIXED_1 - exp); 2191 load += 1UL << (FSHIFT - 1); 2192 return load >> FSHIFT; 2193} 2194 2195#ifdef CONFIG_NO_HZ 2196/* 2197 * For NO_HZ we delay the active fold to the next LOAD_FREQ update. 2198 * 2199 * When making the ILB scale, we should try to pull this in as well. 2200 */ 2201static atomic_long_t calc_load_tasks_idle; 2202 2203void calc_load_account_idle(struct rq *this_rq) 2204{ 2205 long delta; 2206 2207 delta = calc_load_fold_active(this_rq); 2208 if (delta) 2209 atomic_long_add(delta, &calc_load_tasks_idle); 2210} 2211 2212static long calc_load_fold_idle(void) 2213{ 2214 long delta = 0; 2215 2216 /* 2217 * Its got a race, we don't care... 2218 */ 2219 if (atomic_long_read(&calc_load_tasks_idle)) 2220 delta = atomic_long_xchg(&calc_load_tasks_idle, 0); 2221 2222 return delta; 2223} 2224 2225/** 2226 * fixed_power_int - compute: x^n, in O(log n) time 2227 * 2228 * @x: base of the power 2229 * @frac_bits: fractional bits of @x 2230 * @n: power to raise @x to. 2231 * 2232 * By exploiting the relation between the definition of the natural power 2233 * function: x^n := x*x*...*x (x multiplied by itself for n times), and 2234 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, 2235 * (where: n_i \elem {0, 1}, the binary vector representing n), 2236 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is 2237 * of course trivially computable in O(log_2 n), the length of our binary 2238 * vector. 2239 */ 2240static unsigned long 2241fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) 2242{ 2243 unsigned long result = 1UL << frac_bits; 2244 2245 if (n) for (;;) { 2246 if (n & 1) { 2247 result *= x; 2248 result += 1UL << (frac_bits - 1); 2249 result >>= frac_bits; 2250 } 2251 n >>= 1; 2252 if (!n) 2253 break; 2254 x *= x; 2255 x += 1UL << (frac_bits - 1); 2256 x >>= frac_bits; 2257 } 2258 2259 return result; 2260} 2261 2262/* 2263 * a1 = a0 * e + a * (1 - e) 2264 * 2265 * a2 = a1 * e + a * (1 - e) 2266 * = (a0 * e + a * (1 - e)) * e + a * (1 - e) 2267 * = a0 * e^2 + a * (1 - e) * (1 + e) 2268 * 2269 * a3 = a2 * e + a * (1 - e) 2270 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) 2271 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) 2272 * 2273 * ... 2274 * 2275 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] 2276 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) 2277 * = a0 * e^n + a * (1 - e^n) 2278 * 2279 * [1] application of the geometric series: 2280 * 2281 * n 1 - x^(n+1) 2282 * S_n := \Sum x^i = ------------- 2283 * i=0 1 - x 2284 */ 2285static unsigned long 2286calc_load_n(unsigned long load, unsigned long exp, 2287 unsigned long active, unsigned int n) 2288{ 2289 2290 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); 2291} 2292 2293/* 2294 * NO_HZ can leave us missing all per-cpu ticks calling 2295 * calc_load_account_active(), but since an idle CPU folds its delta into 2296 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold 2297 * in the pending idle delta if our idle period crossed a load cycle boundary. 2298 * 2299 * Once we've updated the global active value, we need to apply the exponential 2300 * weights adjusted to the number of cycles missed. 2301 */ 2302static void calc_global_nohz(void) 2303{ 2304 long delta, active, n; 2305 2306 /* 2307 * If we crossed a calc_load_update boundary, make sure to fold 2308 * any pending idle changes, the respective CPUs might have 2309 * missed the tick driven calc_load_account_active() update 2310 * due to NO_HZ. 2311 */ 2312 delta = calc_load_fold_idle(); 2313 if (delta) 2314 atomic_long_add(delta, &calc_load_tasks); 2315 2316 /* 2317 * It could be the one fold was all it took, we done! 2318 */ 2319 if (time_before(jiffies, calc_load_update + 10)) 2320 return; 2321 2322 /* 2323 * Catch-up, fold however many we are behind still 2324 */ 2325 delta = jiffies - calc_load_update - 10; 2326 n = 1 + (delta / LOAD_FREQ); 2327 2328 active = atomic_long_read(&calc_load_tasks); 2329 active = active > 0 ? active * FIXED_1 : 0; 2330 2331 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); 2332 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); 2333 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); 2334 2335 calc_load_update += n * LOAD_FREQ; 2336} 2337#else 2338void calc_load_account_idle(struct rq *this_rq) 2339{ 2340} 2341 2342static inline long calc_load_fold_idle(void) 2343{ 2344 return 0; 2345} 2346 2347static void calc_global_nohz(void) 2348{ 2349} 2350#endif 2351 2352/** 2353 * get_avenrun - get the load average array 2354 * @loads: pointer to dest load array 2355 * @offset: offset to add 2356 * @shift: shift count to shift the result left 2357 * 2358 * These values are estimates at best, so no need for locking. 2359 */ 2360void get_avenrun(unsigned long *loads, unsigned long offset, int shift) 2361{ 2362 loads[0] = (avenrun[0] + offset) << shift; 2363 loads[1] = (avenrun[1] + offset) << shift; 2364 loads[2] = (avenrun[2] + offset) << shift; 2365} 2366 2367/* 2368 * calc_load - update the avenrun load estimates 10 ticks after the 2369 * CPUs have updated calc_load_tasks. 2370 */ 2371void calc_global_load(unsigned long ticks) 2372{ 2373 long active; 2374 2375 if (time_before(jiffies, calc_load_update + 10)) 2376 return; 2377 2378 active = atomic_long_read(&calc_load_tasks); 2379 active = active > 0 ? active * FIXED_1 : 0; 2380 2381 avenrun[0] = calc_load(avenrun[0], EXP_1, active); 2382 avenrun[1] = calc_load(avenrun[1], EXP_5, active); 2383 avenrun[2] = calc_load(avenrun[2], EXP_15, active); 2384 2385 calc_load_update += LOAD_FREQ; 2386 2387 /* 2388 * Account one period with whatever state we found before 2389 * folding in the nohz state and ageing the entire idle period. 2390 * 2391 * This avoids loosing a sample when we go idle between 2392 * calc_load_account_active() (10 ticks ago) and now and thus 2393 * under-accounting. 2394 */ 2395 calc_global_nohz(); 2396} 2397 2398/* 2399 * Called from update_cpu_load() to periodically update this CPU's 2400 * active count. 2401 */ 2402static void calc_load_account_active(struct rq *this_rq) 2403{ 2404 long delta; 2405 2406 if (time_before(jiffies, this_rq->calc_load_update)) 2407 return; 2408 2409 delta = calc_load_fold_active(this_rq); 2410 delta += calc_load_fold_idle(); 2411 if (delta) 2412 atomic_long_add(delta, &calc_load_tasks); 2413 2414 this_rq->calc_load_update += LOAD_FREQ; 2415} 2416 2417/* 2418 * The exact cpuload at various idx values, calculated at every tick would be 2419 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load 2420 * 2421 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called 2422 * on nth tick when cpu may be busy, then we have: 2423 * load = ((2^idx - 1) / 2^idx)^(n-1) * load 2424 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load 2425 * 2426 * decay_load_missed() below does efficient calculation of 2427 * load = ((2^idx - 1) / 2^idx)^(n-1) * load 2428 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load 2429 * 2430 * The calculation is approximated on a 128 point scale. 2431 * degrade_zero_ticks is the number of ticks after which load at any 2432 * particular idx is approximated to be zero. 2433 * degrade_factor is a precomputed table, a row for each load idx. 2434 * Each column corresponds to degradation factor for a power of two ticks, 2435 * based on 128 point scale. 2436 * Example: 2437 * row 2, col 3 (=12) says that the degradation at load idx 2 after 2438 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). 2439 * 2440 * With this power of 2 load factors, we can degrade the load n times 2441 * by looking at 1 bits in n and doing as many mult/shift instead of 2442 * n mult/shifts needed by the exact degradation. 2443 */ 2444#define DEGRADE_SHIFT 7 2445static const unsigned char 2446 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; 2447static const unsigned char 2448 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { 2449 {0, 0, 0, 0, 0, 0, 0, 0}, 2450 {64, 32, 8, 0, 0, 0, 0, 0}, 2451 {96, 72, 40, 12, 1, 0, 0}, 2452 {112, 98, 75, 43, 15, 1, 0}, 2453 {120, 112, 98, 76, 45, 16, 2} }; 2454 2455/* 2456 * Update cpu_load for any missed ticks, due to tickless idle. The backlog 2457 * would be when CPU is idle and so we just decay the old load without 2458 * adding any new load. 2459 */ 2460static unsigned long 2461decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) 2462{ 2463 int j = 0; 2464 2465 if (!missed_updates) 2466 return load; 2467 2468 if (missed_updates >= degrade_zero_ticks[idx]) 2469 return 0; 2470 2471 if (idx == 1) 2472 return load >> missed_updates; 2473 2474 while (missed_updates) { 2475 if (missed_updates % 2) 2476 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; 2477 2478 missed_updates >>= 1; 2479 j++; 2480 } 2481 return load; 2482} 2483 2484/* 2485 * Update rq->cpu_load[] statistics. This function is usually called every 2486 * scheduler tick (TICK_NSEC). With tickless idle this will not be called 2487 * every tick. We fix it up based on jiffies. 2488 */ 2489static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, 2490 unsigned long pending_updates) 2491{ 2492 int i, scale; 2493 2494 this_rq->nr_load_updates++; 2495 2496 /* Update our load: */ 2497 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ 2498 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { 2499 unsigned long old_load, new_load; 2500 2501 /* scale is effectively 1 << i now, and >> i divides by scale */ 2502 2503 old_load = this_rq->cpu_load[i]; 2504 old_load = decay_load_missed(old_load, pending_updates - 1, i); 2505 new_load = this_load; 2506 /* 2507 * Round up the averaging division if load is increasing. This 2508 * prevents us from getting stuck on 9 if the load is 10, for 2509 * example. 2510 */ 2511 if (new_load > old_load) 2512 new_load += scale - 1; 2513 2514 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; 2515 } 2516 2517 sched_avg_update(this_rq); 2518} 2519 2520#ifdef CONFIG_NO_HZ 2521/* 2522 * There is no sane way to deal with nohz on smp when using jiffies because the 2523 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading 2524 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. 2525 * 2526 * Therefore we cannot use the delta approach from the regular tick since that 2527 * would seriously skew the load calculation. However we'll make do for those 2528 * updates happening while idle (nohz_idle_balance) or coming out of idle 2529 * (tick_nohz_idle_exit). 2530 * 2531 * This means we might still be one tick off for nohz periods. 2532 */ 2533 2534/* 2535 * Called from nohz_idle_balance() to update the load ratings before doing the 2536 * idle balance. 2537 */ 2538void update_idle_cpu_load(struct rq *this_rq) 2539{ 2540 unsigned long curr_jiffies = ACCESS_ONCE(jiffies); 2541 unsigned long load = this_rq->load.weight; 2542 unsigned long pending_updates; 2543 2544 /* 2545 * bail if there's load or we're actually up-to-date. 2546 */ 2547 if (load || curr_jiffies == this_rq->last_load_update_tick) 2548 return; 2549 2550 pending_updates = curr_jiffies - this_rq->last_load_update_tick; 2551 this_rq->last_load_update_tick = curr_jiffies; 2552 2553 __update_cpu_load(this_rq, load, pending_updates); 2554} 2555 2556/* 2557 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. 2558 */ 2559void update_cpu_load_nohz(void) 2560{ 2561 struct rq *this_rq = this_rq(); 2562 unsigned long curr_jiffies = ACCESS_ONCE(jiffies); 2563 unsigned long pending_updates; 2564 2565 if (curr_jiffies == this_rq->last_load_update_tick) 2566 return; 2567 2568 raw_spin_lock(&this_rq->lock); 2569 pending_updates = curr_jiffies - this_rq->last_load_update_tick; 2570 if (pending_updates) { 2571 this_rq->last_load_update_tick = curr_jiffies; 2572 /* 2573 * We were idle, this means load 0, the current load might be 2574 * !0 due to remote wakeups and the sort. 2575 */ 2576 __update_cpu_load(this_rq, 0, pending_updates); 2577 } 2578 raw_spin_unlock(&this_rq->lock); 2579} 2580#endif /* CONFIG_NO_HZ */ 2581 2582/* 2583 * Called from scheduler_tick() 2584 */ 2585static void update_cpu_load_active(struct rq *this_rq) 2586{ 2587 /* 2588 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). 2589 */ 2590 this_rq->last_load_update_tick = jiffies; 2591 __update_cpu_load(this_rq, this_rq->load.weight, 1); 2592 2593 calc_load_account_active(this_rq); 2594} 2595 2596#ifdef CONFIG_SMP 2597 2598/* 2599 * sched_exec - execve() is a valuable balancing opportunity, because at 2600 * this point the task has the smallest effective memory and cache footprint. 2601 */ 2602void sched_exec(void) 2603{ 2604 struct task_struct *p = current; 2605 unsigned long flags; 2606 int dest_cpu; 2607 2608 raw_spin_lock_irqsave(&p->pi_lock, flags); 2609 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); 2610 if (dest_cpu == smp_processor_id()) 2611 goto unlock; 2612 2613 if (likely(cpu_active(dest_cpu))) { 2614 struct migration_arg arg = { p, dest_cpu }; 2615 2616 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2617 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 2618 return; 2619 } 2620unlock: 2621 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2622} 2623 2624#endif 2625 2626DEFINE_PER_CPU(struct kernel_stat, kstat); 2627DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 2628 2629EXPORT_PER_CPU_SYMBOL(kstat); 2630EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2631 2632/* 2633 * Return any ns on the sched_clock that have not yet been accounted in 2634 * @p in case that task is currently running. 2635 * 2636 * Called with task_rq_lock() held on @rq. 2637 */ 2638static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) 2639{ 2640 u64 ns = 0; 2641 2642 if (task_current(rq, p)) { 2643 update_rq_clock(rq); 2644 ns = rq->clock_task - p->se.exec_start; 2645 if ((s64)ns < 0) 2646 ns = 0; 2647 } 2648 2649 return ns; 2650} 2651 2652unsigned long long task_delta_exec(struct task_struct *p) 2653{ 2654 unsigned long flags; 2655 struct rq *rq; 2656 u64 ns = 0; 2657 2658 rq = task_rq_lock(p, &flags); 2659 ns = do_task_delta_exec(p, rq); 2660 task_rq_unlock(rq, p, &flags); 2661 2662 return ns; 2663} 2664 2665/* 2666 * Return accounted runtime for the task. 2667 * In case the task is currently running, return the runtime plus current's 2668 * pending runtime that have not been accounted yet. 2669 */ 2670unsigned long long task_sched_runtime(struct task_struct *p) 2671{ 2672 unsigned long flags; 2673 struct rq *rq; 2674 u64 ns = 0; 2675 2676 rq = task_rq_lock(p, &flags); 2677 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); 2678 task_rq_unlock(rq, p, &flags); 2679 2680 return ns; 2681} 2682 2683#ifdef CONFIG_CGROUP_CPUACCT 2684struct cgroup_subsys cpuacct_subsys; 2685struct cpuacct root_cpuacct; 2686#endif 2687 2688static inline void task_group_account_field(struct task_struct *p, int index, 2689 u64 tmp) 2690{ 2691#ifdef CONFIG_CGROUP_CPUACCT 2692 struct kernel_cpustat *kcpustat; 2693 struct cpuacct *ca; 2694#endif 2695 /* 2696 * Since all updates are sure to touch the root cgroup, we 2697 * get ourselves ahead and touch it first. If the root cgroup 2698 * is the only cgroup, then nothing else should be necessary. 2699 * 2700 */ 2701 __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; 2702 2703#ifdef CONFIG_CGROUP_CPUACCT 2704 if (unlikely(!cpuacct_subsys.active)) 2705 return; 2706 2707 rcu_read_lock(); 2708 ca = task_ca(p); 2709 while (ca && (ca != &root_cpuacct)) { 2710 kcpustat = this_cpu_ptr(ca->cpustat); 2711 kcpustat->cpustat[index] += tmp; 2712 ca = parent_ca(ca); 2713 } 2714 rcu_read_unlock(); 2715#endif 2716} 2717 2718 2719/* 2720 * Account user cpu time to a process. 2721 * @p: the process that the cpu time gets accounted to 2722 * @cputime: the cpu time spent in user space since the last update 2723 * @cputime_scaled: cputime scaled by cpu frequency 2724 */ 2725void account_user_time(struct task_struct *p, cputime_t cputime, 2726 cputime_t cputime_scaled) 2727{ 2728 int index; 2729 2730 /* Add user time to process. */ 2731 p->utime += cputime; 2732 p->utimescaled += cputime_scaled; 2733 account_group_user_time(p, cputime); 2734 2735 index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; 2736 2737 /* Add user time to cpustat. */ 2738 task_group_account_field(p, index, (__force u64) cputime); 2739 2740 /* Account for user time used */ 2741 acct_update_integrals(p); 2742} 2743 2744/* 2745 * Account guest cpu time to a process. 2746 * @p: the process that the cpu time gets accounted to 2747 * @cputime: the cpu time spent in virtual machine since the last update 2748 * @cputime_scaled: cputime scaled by cpu frequency 2749 */ 2750static void account_guest_time(struct task_struct *p, cputime_t cputime, 2751 cputime_t cputime_scaled) 2752{ 2753 u64 *cpustat = kcpustat_this_cpu->cpustat; 2754 2755 /* Add guest time to process. */ 2756 p->utime += cputime; 2757 p->utimescaled += cputime_scaled; 2758 account_group_user_time(p, cputime); 2759 p->gtime += cputime; 2760 2761 /* Add guest time to cpustat. */ 2762 if (TASK_NICE(p) > 0) { 2763 cpustat[CPUTIME_NICE] += (__force u64) cputime; 2764 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; 2765 } else { 2766 cpustat[CPUTIME_USER] += (__force u64) cputime; 2767 cpustat[CPUTIME_GUEST] += (__force u64) cputime; 2768 } 2769} 2770 2771/* 2772 * Account system cpu time to a process and desired cpustat field 2773 * @p: the process that the cpu time gets accounted to 2774 * @cputime: the cpu time spent in kernel space since the last update 2775 * @cputime_scaled: cputime scaled by cpu frequency 2776 * @target_cputime64: pointer to cpustat field that has to be updated 2777 */ 2778static inline 2779void __account_system_time(struct task_struct *p, cputime_t cputime, 2780 cputime_t cputime_scaled, int index) 2781{ 2782 /* Add system time to process. */ 2783 p->stime += cputime; 2784 p->stimescaled += cputime_scaled; 2785 account_group_system_time(p, cputime); 2786 2787 /* Add system time to cpustat. */ 2788 task_group_account_field(p, index, (__force u64) cputime); 2789 2790 /* Account for system time used */ 2791 acct_update_integrals(p); 2792} 2793 2794/* 2795 * Account system cpu time to a process. 2796 * @p: the process that the cpu time gets accounted to 2797 * @hardirq_offset: the offset to subtract from hardirq_count() 2798 * @cputime: the cpu time spent in kernel space since the last update 2799 * @cputime_scaled: cputime scaled by cpu frequency 2800 */ 2801void account_system_time(struct task_struct *p, int hardirq_offset, 2802 cputime_t cputime, cputime_t cputime_scaled) 2803{ 2804 int index; 2805 2806 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { 2807 account_guest_time(p, cputime, cputime_scaled); 2808 return; 2809 } 2810 2811 if (hardirq_count() - hardirq_offset) 2812 index = CPUTIME_IRQ; 2813 else if (in_serving_softirq()) 2814 index = CPUTIME_SOFTIRQ; 2815 else 2816 index = CPUTIME_SYSTEM; 2817 2818 __account_system_time(p, cputime, cputime_scaled, index); 2819} 2820 2821/* 2822 * Account for involuntary wait time. 2823 * @cputime: the cpu time spent in involuntary wait 2824 */ 2825void account_steal_time(cputime_t cputime) 2826{ 2827 u64 *cpustat = kcpustat_this_cpu->cpustat; 2828 2829 cpustat[CPUTIME_STEAL] += (__force u64) cputime; 2830} 2831 2832/* 2833 * Account for idle time. 2834 * @cputime: the cpu time spent in idle wait 2835 */ 2836void account_idle_time(cputime_t cputime) 2837{ 2838 u64 *cpustat = kcpustat_this_cpu->cpustat; 2839 struct rq *rq = this_rq(); 2840 2841 if (atomic_read(&rq->nr_iowait) > 0) 2842 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; 2843 else 2844 cpustat[CPUTIME_IDLE] += (__force u64) cputime; 2845} 2846 2847static __always_inline bool steal_account_process_tick(void) 2848{ 2849#ifdef CONFIG_PARAVIRT 2850 if (static_key_false(¶virt_steal_enabled)) { 2851 u64 steal, st = 0; 2852 2853 steal = paravirt_steal_clock(smp_processor_id()); 2854 steal -= this_rq()->prev_steal_time; 2855 2856 st = steal_ticks(steal); 2857 this_rq()->prev_steal_time += st * TICK_NSEC; 2858 2859 account_steal_time(st); 2860 return st; 2861 } 2862#endif 2863 return false; 2864} 2865 2866#ifndef CONFIG_VIRT_CPU_ACCOUNTING 2867 2868#ifdef CONFIG_IRQ_TIME_ACCOUNTING 2869/* 2870 * Account a tick to a process and cpustat 2871 * @p: the process that the cpu time gets accounted to 2872 * @user_tick: is the tick from userspace 2873 * @rq: the pointer to rq 2874 * 2875 * Tick demultiplexing follows the order 2876 * - pending hardirq update 2877 * - pending softirq update 2878 * - user_time 2879 * - idle_time 2880 * - system time 2881 * - check for guest_time 2882 * - else account as system_time 2883 * 2884 * Check for hardirq is done both for system and user time as there is 2885 * no timer going off while we are on hardirq and hence we may never get an 2886 * opportunity to update it solely in system time. 2887 * p->stime and friends are only updated on system time and not on irq 2888 * softirq as those do not count in task exec_runtime any more. 2889 */ 2890static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 2891 struct rq *rq) 2892{ 2893 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 2894 u64 *cpustat = kcpustat_this_cpu->cpustat; 2895 2896 if (steal_account_process_tick()) 2897 return; 2898 2899 if (irqtime_account_hi_update()) { 2900 cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; 2901 } else if (irqtime_account_si_update()) { 2902 cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; 2903 } else if (this_cpu_ksoftirqd() == p) { 2904 /* 2905 * ksoftirqd time do not get accounted in cpu_softirq_time. 2906 * So, we have to handle it separately here. 2907 * Also, p->stime needs to be updated for ksoftirqd. 2908 */ 2909 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, 2910 CPUTIME_SOFTIRQ); 2911 } else if (user_tick) { 2912 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 2913 } else if (p == rq->idle) { 2914 account_idle_time(cputime_one_jiffy); 2915 } else if (p->flags & PF_VCPU) { /* System time or guest time */ 2916 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); 2917 } else { 2918 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, 2919 CPUTIME_SYSTEM); 2920 } 2921} 2922 2923static void irqtime_account_idle_ticks(int ticks) 2924{ 2925 int i; 2926 struct rq *rq = this_rq(); 2927 2928 for (i = 0; i < ticks; i++) 2929 irqtime_account_process_tick(current, 0, rq); 2930} 2931#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 2932static void irqtime_account_idle_ticks(int ticks) {} 2933static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 2934 struct rq *rq) {} 2935#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2936 2937/* 2938 * Account a single tick of cpu time. 2939 * @p: the process that the cpu time gets accounted to 2940 * @user_tick: indicates if the tick is a user or a system tick 2941 */ 2942void account_process_tick(struct task_struct *p, int user_tick) 2943{ 2944 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 2945 struct rq *rq = this_rq(); 2946 2947 if (sched_clock_irqtime) { 2948 irqtime_account_process_tick(p, user_tick, rq); 2949 return; 2950 } 2951 2952 if (steal_account_process_tick()) 2953 return; 2954 2955 if (user_tick) 2956 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 2957 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 2958 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, 2959 one_jiffy_scaled); 2960 else 2961 account_idle_time(cputime_one_jiffy); 2962} 2963 2964/* 2965 * Account multiple ticks of steal time. 2966 * @p: the process from which the cpu time has been stolen 2967 * @ticks: number of stolen ticks 2968 */ 2969void account_steal_ticks(unsigned long ticks) 2970{ 2971 account_steal_time(jiffies_to_cputime(ticks)); 2972} 2973 2974/* 2975 * Account multiple ticks of idle time. 2976 * @ticks: number of stolen ticks 2977 */ 2978void account_idle_ticks(unsigned long ticks) 2979{ 2980 2981 if (sched_clock_irqtime) { 2982 irqtime_account_idle_ticks(ticks); 2983 return; 2984 } 2985 2986 account_idle_time(jiffies_to_cputime(ticks)); 2987} 2988 2989#endif 2990 2991/* 2992 * Use precise platform statistics if available: 2993 */ 2994#ifdef CONFIG_VIRT_CPU_ACCOUNTING 2995void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) 2996{ 2997 *ut = p->utime; 2998 *st = p->stime; 2999} 3000 3001void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) 3002{ 3003 struct task_cputime cputime; 3004 3005 thread_group_cputime(p, &cputime); 3006 3007 *ut = cputime.utime; 3008 *st = cputime.stime; 3009} 3010#else 3011 3012#ifndef nsecs_to_cputime 3013# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) 3014#endif 3015 3016void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) 3017{ 3018 cputime_t rtime, utime = p->utime, total = utime + p->stime; 3019 3020 /* 3021 * Use CFS's precise accounting: 3022 */ 3023 rtime = nsecs_to_cputime(p->se.sum_exec_runtime); 3024 3025 if (total) { 3026 u64 temp = (__force u64) rtime; 3027 3028 temp *= (__force u64) utime; 3029 do_div(temp, (__force u32) total); 3030 utime = (__force cputime_t) temp; 3031 } else 3032 utime = rtime; 3033 3034 /* 3035 * Compare with previous values, to keep monotonicity: 3036 */ 3037 p->prev_utime = max(p->prev_utime, utime); 3038 p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); 3039 3040 *ut = p->prev_utime; 3041 *st = p->prev_stime; 3042} 3043 3044/* 3045 * Must be called with siglock held. 3046 */ 3047void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) 3048{ 3049 struct signal_struct *sig = p->signal; 3050 struct task_cputime cputime; 3051 cputime_t rtime, utime, total; 3052 3053 thread_group_cputime(p, &cputime); 3054 3055 total = cputime.utime + cputime.stime; 3056 rtime = nsecs_to_cputime(cputime.sum_exec_runtime); 3057 3058 if (total) { 3059 u64 temp = (__force u64) rtime; 3060 3061 temp *= (__force u64) cputime.utime; 3062 do_div(temp, (__force u32) total); 3063 utime = (__force cputime_t) temp; 3064 } else 3065 utime = rtime; 3066 3067 sig->prev_utime = max(sig->prev_utime, utime); 3068 sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime); 3069 3070 *ut = sig->prev_utime; 3071 *st = sig->prev_stime; 3072} 3073#endif 3074 3075/* 3076 * This function gets called by the timer code, with HZ frequency. 3077 * We call it with interrupts disabled. 3078 */ 3079void scheduler_tick(void) 3080{ 3081 int cpu = smp_processor_id(); 3082 struct rq *rq = cpu_rq(cpu); 3083 struct task_struct *curr = rq->curr; 3084 3085 sched_clock_tick(); 3086 3087 raw_spin_lock(&rq->lock); 3088 update_rq_clock(rq); 3089 update_cpu_load_active(rq); 3090 curr->sched_class->task_tick(rq, curr, 0); 3091 raw_spin_unlock(&rq->lock); 3092 3093 perf_event_task_tick(); 3094 3095#ifdef CONFIG_SMP 3096 rq->idle_balance = idle_cpu(cpu); 3097 trigger_load_balance(rq, cpu); 3098#endif 3099} 3100 3101notrace unsigned long get_parent_ip(unsigned long addr) 3102{ 3103 if (in_lock_functions(addr)) { 3104 addr = CALLER_ADDR2; 3105 if (in_lock_functions(addr)) 3106 addr = CALLER_ADDR3; 3107 } 3108 return addr; 3109} 3110 3111#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3112 defined(CONFIG_PREEMPT_TRACER)) 3113 3114void __kprobes add_preempt_count(int val) 3115{ 3116#ifdef CONFIG_DEBUG_PREEMPT 3117 /* 3118 * Underflow? 3119 */ 3120 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 3121 return; 3122#endif 3123 preempt_count() += val; 3124#ifdef CONFIG_DEBUG_PREEMPT 3125 /* 3126 * Spinlock count overflowing soon? 3127 */ 3128 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 3129 PREEMPT_MASK - 10); 3130#endif 3131 if (preempt_count() == val) 3132 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 3133} 3134EXPORT_SYMBOL(add_preempt_count); 3135 3136void __kprobes sub_preempt_count(int val) 3137{ 3138#ifdef CONFIG_DEBUG_PREEMPT 3139 /* 3140 * Underflow? 3141 */ 3142 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 3143 return; 3144 /* 3145 * Is the spinlock portion underflowing? 3146 */ 3147 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 3148 !(preempt_count() & PREEMPT_MASK))) 3149 return; 3150#endif 3151 3152 if (preempt_count() == val) 3153 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 3154 preempt_count() -= val; 3155} 3156EXPORT_SYMBOL(sub_preempt_count); 3157 3158#endif 3159 3160/* 3161 * Print scheduling while atomic bug: 3162 */ 3163static noinline void __schedule_bug(struct task_struct *prev) 3164{ 3165 if (oops_in_progress) 3166 return; 3167 3168 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 3169 prev->comm, prev->pid, preempt_count()); 3170 3171 debug_show_held_locks(prev); 3172 print_modules(); 3173 if (irqs_disabled()) 3174 print_irqtrace_events(prev); 3175 dump_stack(); 3176 add_taint(TAINT_WARN); 3177} 3178 3179/* 3180 * Various schedule()-time debugging checks and statistics: 3181 */ 3182static inline void schedule_debug(struct task_struct *prev) 3183{ 3184 /* 3185 * Test if we are atomic. Since do_exit() needs to call into 3186 * schedule() atomically, we ignore that path for now. 3187 * Otherwise, whine if we are scheduling when we should not be. 3188 */ 3189 if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) 3190 __schedule_bug(prev); 3191 rcu_sleep_check(); 3192 3193 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3194 3195 schedstat_inc(this_rq(), sched_count); 3196} 3197 3198static void put_prev_task(struct rq *rq, struct task_struct *prev) 3199{ 3200 if (prev->on_rq || rq->skip_clock_update < 0) 3201 update_rq_clock(rq); 3202 prev->sched_class->put_prev_task(rq, prev); 3203} 3204 3205/* 3206 * Pick up the highest-prio task: 3207 */ 3208static inline struct task_struct * 3209pick_next_task(struct rq *rq) 3210{ 3211 const struct sched_class *class; 3212 struct task_struct *p; 3213 3214 /* 3215 * Optimization: we know that if all tasks are in 3216 * the fair class we can call that function directly: 3217 */ 3218 if (likely(rq->nr_running == rq->cfs.h_nr_running)) { 3219 p = fair_sched_class.pick_next_task(rq); 3220 if (likely(p)) 3221 return p; 3222 } 3223 3224 for_each_class(class) { 3225 p = class->pick_next_task(rq); 3226 if (p) 3227 return p; 3228 } 3229 3230 BUG(); /* the idle class will always have a runnable task */ 3231} 3232 3233/* 3234 * __schedule() is the main scheduler function. 3235 */ 3236static void __sched __schedule(void) 3237{ 3238 struct task_struct *prev, *next; 3239 unsigned long *switch_count; 3240 struct rq *rq; 3241 int cpu; 3242 3243need_resched: 3244 preempt_disable(); 3245 cpu = smp_processor_id(); 3246 rq = cpu_rq(cpu); 3247 rcu_note_context_switch(cpu); 3248 prev = rq->curr; 3249 3250 schedule_debug(prev); 3251 3252 if (sched_feat(HRTICK)) 3253 hrtick_clear(rq); 3254 3255 raw_spin_lock_irq(&rq->lock); 3256 3257 switch_count = &prev->nivcsw; 3258 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3259 if (unlikely(signal_pending_state(prev->state, prev))) { 3260 prev->state = TASK_RUNNING; 3261 } else { 3262 deactivate_task(rq, prev, DEQUEUE_SLEEP); 3263 prev->on_rq = 0; 3264 3265 /* 3266 * If a worker went to sleep, notify and ask workqueue 3267 * whether it wants to wake up a task to maintain 3268 * concurrency. 3269 */ 3270 if (prev->flags & PF_WQ_WORKER) { 3271 struct task_struct *to_wakeup; 3272 3273 to_wakeup = wq_worker_sleeping(prev, cpu); 3274 if (to_wakeup) 3275 try_to_wake_up_local(to_wakeup); 3276 } 3277 } 3278 switch_count = &prev->nvcsw; 3279 } 3280 3281 pre_schedule(rq, prev); 3282 3283 if (unlikely(!rq->nr_running)) 3284 idle_balance(cpu, rq); 3285 3286 put_prev_task(rq, prev); 3287 next = pick_next_task(rq); 3288 clear_tsk_need_resched(prev); 3289 rq->skip_clock_update = 0; 3290 3291 if (likely(prev != next)) { 3292 rq->nr_switches++; 3293 rq->curr = next; 3294 ++*switch_count; 3295 3296 context_switch(rq, prev, next); /* unlocks the rq */ 3297 /* 3298 * The context switch have flipped the stack from under us 3299 * and restored the local variables which were saved when 3300 * this task called schedule() in the past. prev == current 3301 * is still correct, but it can be moved to another cpu/rq. 3302 */ 3303 cpu = smp_processor_id(); 3304 rq = cpu_rq(cpu); 3305 } else 3306 raw_spin_unlock_irq(&rq->lock); 3307 3308 post_schedule(rq); 3309 3310 sched_preempt_enable_no_resched(); 3311 if (need_resched()) 3312 goto need_resched; 3313} 3314 3315static inline void sched_submit_work(struct task_struct *tsk) 3316{ 3317 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3318 return; 3319 /* 3320 * If we are going to sleep and we have plugged IO queued, 3321 * make sure to submit it to avoid deadlocks. 3322 */ 3323 if (blk_needs_flush_plug(tsk)) 3324 blk_schedule_flush_plug(tsk); 3325} 3326 3327asmlinkage void __sched schedule(void) 3328{ 3329 struct task_struct *tsk = current; 3330 3331 sched_submit_work(tsk); 3332 __schedule(); 3333} 3334EXPORT_SYMBOL(schedule); 3335 3336/** 3337 * schedule_preempt_disabled - called with preemption disabled 3338 * 3339 * Returns with preemption disabled. Note: preempt_count must be 1 3340 */ 3341void __sched schedule_preempt_disabled(void) 3342{ 3343 sched_preempt_enable_no_resched(); 3344 schedule(); 3345 preempt_disable(); 3346} 3347 3348#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 3349 3350static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 3351{ 3352 if (lock->owner != owner) 3353 return false; 3354 3355 /* 3356 * Ensure we emit the owner->on_cpu, dereference _after_ checking 3357 * lock->owner still matches owner, if that fails, owner might 3358 * point to free()d memory, if it still matches, the rcu_read_lock() 3359 * ensures the memory stays valid. 3360 */ 3361 barrier(); 3362 3363 return owner->on_cpu; 3364} 3365 3366/* 3367 * Look out! "owner" is an entirely speculative pointer 3368 * access and not reliable. 3369 */ 3370int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) 3371{ 3372 if (!sched_feat(OWNER_SPIN)) 3373 return 0; 3374 3375 rcu_read_lock(); 3376 while (owner_running(lock, owner)) { 3377 if (need_resched()) 3378 break; 3379 3380 arch_mutex_cpu_relax(); 3381 } 3382 rcu_read_unlock(); 3383 3384 /* 3385 * We break out the loop above on need_resched() and when the 3386 * owner changed, which is a sign for heavy contention. Return 3387 * success only when lock->owner is NULL. 3388 */ 3389 return lock->owner == NULL; 3390} 3391#endif 3392 3393#ifdef CONFIG_PREEMPT 3394/* 3395 * this is the entry point to schedule() from in-kernel preemption 3396 * off of preempt_enable. Kernel preemptions off return from interrupt 3397 * occur there and call schedule directly. 3398 */ 3399asmlinkage void __sched notrace preempt_schedule(void) 3400{ 3401 struct thread_info *ti = current_thread_info(); 3402 3403 /* 3404 * If there is a non-zero preempt_count or interrupts are disabled, 3405 * we do not want to preempt the current task. Just return.. 3406 */ 3407 if (likely(ti->preempt_count || irqs_disabled())) 3408 return; 3409 3410 do { 3411 add_preempt_count_notrace(PREEMPT_ACTIVE); 3412 __schedule(); 3413 sub_preempt_count_notrace(PREEMPT_ACTIVE); 3414 3415 /* 3416 * Check again in case we missed a preemption opportunity 3417 * between schedule and now. 3418 */ 3419 barrier(); 3420 } while (need_resched()); 3421} 3422EXPORT_SYMBOL(preempt_schedule); 3423 3424/* 3425 * this is the entry point to schedule() from kernel preemption 3426 * off of irq context. 3427 * Note, that this is called and return with irqs disabled. This will 3428 * protect us against recursive calling from irq. 3429 */ 3430asmlinkage void __sched preempt_schedule_irq(void) 3431{ 3432 struct thread_info *ti = current_thread_info(); 3433 3434 /* Catch callers which need to be fixed */ 3435 BUG_ON(ti->preempt_count || !irqs_disabled()); 3436 3437 do { 3438 add_preempt_count(PREEMPT_ACTIVE); 3439 local_irq_enable(); 3440 __schedule(); 3441 local_irq_disable(); 3442 sub_preempt_count(PREEMPT_ACTIVE); 3443 3444 /* 3445 * Check again in case we missed a preemption opportunity 3446 * between schedule and now. 3447 */ 3448 barrier(); 3449 } while (need_resched()); 3450} 3451 3452#endif /* CONFIG_PREEMPT */ 3453 3454int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 3455 void *key) 3456{ 3457 return try_to_wake_up(curr->private, mode, wake_flags); 3458} 3459EXPORT_SYMBOL(default_wake_function); 3460 3461/* 3462 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just 3463 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve 3464 * number) then we wake all the non-exclusive tasks and one exclusive task. 3465 * 3466 * There are circumstances in which we can try to wake a task which has already 3467 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns 3468 * zero in this (rare) case, and we handle it by continuing to scan the queue. 3469 */ 3470static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 3471 int nr_exclusive, int wake_flags, void *key) 3472{ 3473 wait_queue_t *curr, *next; 3474 3475 list_for_each_entry_safe(curr, next, &q->task_list, task_list) { 3476 unsigned flags = curr->flags; 3477 3478 if (curr->func(curr, mode, wake_flags, key) && 3479 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) 3480 break; 3481 } 3482} 3483 3484/** 3485 * __wake_up - wake up threads blocked on a waitqueue. 3486 * @q: the waitqueue 3487 * @mode: which threads 3488 * @nr_exclusive: how many wake-one or wake-many threads to wake up 3489 * @key: is directly passed to the wakeup function 3490 * 3491 * It may be assumed that this function implies a write memory barrier before 3492 * changing the task state if and only if any tasks are woken up. 3493 */ 3494void __wake_up(wait_queue_head_t *q, unsigned int mode, 3495 int nr_exclusive, void *key) 3496{ 3497 unsigned long flags; 3498 3499 spin_lock_irqsave(&q->lock, flags); 3500 __wake_up_common(q, mode, nr_exclusive, 0, key); 3501 spin_unlock_irqrestore(&q->lock, flags); 3502} 3503EXPORT_SYMBOL(__wake_up); 3504 3505/* 3506 * Same as __wake_up but called with the spinlock in wait_queue_head_t held. 3507 */ 3508void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) 3509{ 3510 __wake_up_common(q, mode, nr, 0, NULL); 3511} 3512EXPORT_SYMBOL_GPL(__wake_up_locked); 3513 3514void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) 3515{ 3516 __wake_up_common(q, mode, 1, 0, key); 3517} 3518EXPORT_SYMBOL_GPL(__wake_up_locked_key); 3519 3520/** 3521 * __wake_up_sync_key - wake up threads blocked on a waitqueue. 3522 * @q: the waitqueue 3523 * @mode: which threads 3524 * @nr_exclusive: how many wake-one or wake-many threads to wake up 3525 * @key: opaque value to be passed to wakeup targets 3526 * 3527 * The sync wakeup differs that the waker knows that it will schedule 3528 * away soon, so while the target thread will be woken up, it will not 3529 * be migrated to another CPU - ie. the two threads are 'synchronized' 3530 * with each other. This can prevent needless bouncing between CPUs. 3531 * 3532 * On UP it can prevent extra preemption. 3533 * 3534 * It may be assumed that this function implies a write memory barrier before 3535 * changing the task state if and only if any tasks are woken up. 3536 */ 3537void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, 3538 int nr_exclusive, void *key) 3539{ 3540 unsigned long flags; 3541 int wake_flags = WF_SYNC; 3542 3543 if (unlikely(!q)) 3544 return; 3545 3546 if (unlikely(!nr_exclusive)) 3547 wake_flags = 0; 3548 3549 spin_lock_irqsave(&q->lock, flags); 3550 __wake_up_common(q, mode, nr_exclusive, wake_flags, key); 3551 spin_unlock_irqrestore(&q->lock, flags); 3552} 3553EXPORT_SYMBOL_GPL(__wake_up_sync_key); 3554 3555/* 3556 * __wake_up_sync - see __wake_up_sync_key() 3557 */ 3558void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) 3559{ 3560 __wake_up_sync_key(q, mode, nr_exclusive, NULL); 3561} 3562EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ 3563 3564/** 3565 * complete: - signals a single thread waiting on this completion 3566 * @x: holds the state of this particular completion 3567 * 3568 * This will wake up a single thread waiting on this completion. Threads will be 3569 * awakened in the same order in which they were queued. 3570 * 3571 * See also complete_all(), wait_for_completion() and related routines. 3572 * 3573 * It may be assumed that this function implies a write memory barrier before 3574 * changing the task state if and only if any tasks are woken up. 3575 */ 3576void complete(struct completion *x) 3577{ 3578 unsigned long flags; 3579 3580 spin_lock_irqsave(&x->wait.lock, flags); 3581 x->done++; 3582 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); 3583 spin_unlock_irqrestore(&x->wait.lock, flags); 3584} 3585EXPORT_SYMBOL(complete); 3586 3587/** 3588 * complete_all: - signals all threads waiting on this completion 3589 * @x: holds the state of this particular completion 3590 * 3591 * This will wake up all threads waiting on this particular completion event. 3592 * 3593 * It may be assumed that this function implies a write memory barrier before 3594 * changing the task state if and only if any tasks are woken up. 3595 */ 3596void complete_all(struct completion *x) 3597{ 3598 unsigned long flags; 3599 3600 spin_lock_irqsave(&x->wait.lock, flags); 3601 x->done += UINT_MAX/2; 3602 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); 3603 spin_unlock_irqrestore(&x->wait.lock, flags); 3604} 3605EXPORT_SYMBOL(complete_all); 3606 3607static inline long __sched 3608do_wait_for_common(struct completion *x, long timeout, int state) 3609{ 3610 if (!x->done) { 3611 DECLARE_WAITQUEUE(wait, current); 3612 3613 __add_wait_queue_tail_exclusive(&x->wait, &wait); 3614 do { 3615 if (signal_pending_state(state, current)) { 3616 timeout = -ERESTARTSYS; 3617 break; 3618 } 3619 __set_current_state(state); 3620 spin_unlock_irq(&x->wait.lock); 3621 timeout = schedule_timeout(timeout); 3622 spin_lock_irq(&x->wait.lock); 3623 } while (!x->done && timeout); 3624 __remove_wait_queue(&x->wait, &wait); 3625 if (!x->done) 3626 return timeout; 3627 } 3628 x->done--; 3629 return timeout ?: 1; 3630} 3631 3632static long __sched 3633wait_for_common(struct completion *x, long timeout, int state) 3634{ 3635 might_sleep(); 3636 3637 spin_lock_irq(&x->wait.lock); 3638 timeout = do_wait_for_common(x, timeout, state); 3639 spin_unlock_irq(&x->wait.lock); 3640 return timeout; 3641} 3642 3643/** 3644 * wait_for_completion: - waits for completion of a task 3645 * @x: holds the state of this particular completion 3646 * 3647 * This waits to be signaled for completion of a specific task. It is NOT 3648 * interruptible and there is no timeout. 3649 * 3650 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout 3651 * and interrupt capability. Also see complete(). 3652 */ 3653void __sched wait_for_completion(struct completion *x) 3654{ 3655 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); 3656} 3657EXPORT_SYMBOL(wait_for_completion); 3658 3659/** 3660 * wait_for_completion_timeout: - waits for completion of a task (w/timeout) 3661 * @x: holds the state of this particular completion 3662 * @timeout: timeout value in jiffies 3663 * 3664 * This waits for either a completion of a specific task to be signaled or for a 3665 * specified timeout to expire. The timeout is in jiffies. It is not 3666 * interruptible. 3667 * 3668 * The return value is 0 if timed out, and positive (at least 1, or number of 3669 * jiffies left till timeout) if completed. 3670 */ 3671unsigned long __sched 3672wait_for_completion_timeout(struct completion *x, unsigned long timeout) 3673{ 3674 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); 3675} 3676EXPORT_SYMBOL(wait_for_completion_timeout); 3677 3678/** 3679 * wait_for_completion_interruptible: - waits for completion of a task (w/intr) 3680 * @x: holds the state of this particular completion 3681 * 3682 * This waits for completion of a specific task to be signaled. It is 3683 * interruptible. 3684 * 3685 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 3686 */ 3687int __sched wait_for_completion_interruptible(struct completion *x) 3688{ 3689 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); 3690 if (t == -ERESTARTSYS) 3691 return t; 3692 return 0; 3693} 3694EXPORT_SYMBOL(wait_for_completion_interruptible); 3695 3696/** 3697 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) 3698 * @x: holds the state of this particular completion 3699 * @timeout: timeout value in jiffies 3700 * 3701 * This waits for either a completion of a specific task to be signaled or for a 3702 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 3703 * 3704 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 3705 * positive (at least 1, or number of jiffies left till timeout) if completed. 3706 */ 3707long __sched 3708wait_for_completion_interruptible_timeout(struct completion *x, 3709 unsigned long timeout) 3710{ 3711 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); 3712} 3713EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); 3714 3715/** 3716 * wait_for_completion_killable: - waits for completion of a task (killable) 3717 * @x: holds the state of this particular completion 3718 * 3719 * This waits to be signaled for completion of a specific task. It can be 3720 * interrupted by a kill signal. 3721 * 3722 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 3723 */ 3724int __sched wait_for_completion_killable(struct completion *x) 3725{ 3726 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); 3727 if (t == -ERESTARTSYS) 3728 return t; 3729 return 0; 3730} 3731EXPORT_SYMBOL(wait_for_completion_killable); 3732 3733/** 3734 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) 3735 * @x: holds the state of this particular completion 3736 * @timeout: timeout value in jiffies 3737 * 3738 * This waits for either a completion of a specific task to be 3739 * signaled or for a specified timeout to expire. It can be 3740 * interrupted by a kill signal. The timeout is in jiffies. 3741 * 3742 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 3743 * positive (at least 1, or number of jiffies left till timeout) if completed. 3744 */ 3745long __sched 3746wait_for_completion_killable_timeout(struct completion *x, 3747 unsigned long timeout) 3748{ 3749 return wait_for_common(x, timeout, TASK_KILLABLE); 3750} 3751EXPORT_SYMBOL(wait_for_completion_killable_timeout); 3752 3753/** 3754 * try_wait_for_completion - try to decrement a completion without blocking 3755 * @x: completion structure 3756 * 3757 * Returns: 0 if a decrement cannot be done without blocking 3758 * 1 if a decrement succeeded. 3759 * 3760 * If a completion is being used as a counting completion, 3761 * attempt to decrement the counter without blocking. This 3762 * enables us to avoid waiting if the resource the completion 3763 * is protecting is not available. 3764 */ 3765bool try_wait_for_completion(struct completion *x) 3766{ 3767 unsigned long flags; 3768 int ret = 1; 3769 3770 spin_lock_irqsave(&x->wait.lock, flags); 3771 if (!x->done) 3772 ret = 0; 3773 else 3774 x->done--; 3775 spin_unlock_irqrestore(&x->wait.lock, flags); 3776 return ret; 3777} 3778EXPORT_SYMBOL(try_wait_for_completion); 3779 3780/** 3781 * completion_done - Test to see if a completion has any waiters 3782 * @x: completion structure 3783 * 3784 * Returns: 0 if there are waiters (wait_for_completion() in progress) 3785 * 1 if there are no waiters. 3786 * 3787 */ 3788bool completion_done(struct completion *x) 3789{ 3790 unsigned long flags; 3791 int ret = 1; 3792 3793 spin_lock_irqsave(&x->wait.lock, flags); 3794 if (!x->done) 3795 ret = 0; 3796 spin_unlock_irqrestore(&x->wait.lock, flags); 3797 return ret; 3798} 3799EXPORT_SYMBOL(completion_done); 3800 3801static long __sched 3802sleep_on_common(wait_queue_head_t *q, int state, long timeout) 3803{ 3804 unsigned long flags; 3805 wait_queue_t wait; 3806 3807 init_waitqueue_entry(&wait, current); 3808 3809 __set_current_state(state); 3810 3811 spin_lock_irqsave(&q->lock, flags); 3812 __add_wait_queue(q, &wait); 3813 spin_unlock(&q->lock); 3814 timeout = schedule_timeout(timeout); 3815 spin_lock_irq(&q->lock); 3816 __remove_wait_queue(q, &wait); 3817 spin_unlock_irqrestore(&q->lock, flags); 3818 3819 return timeout; 3820} 3821 3822void __sched interruptible_sleep_on(wait_queue_head_t *q) 3823{ 3824 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 3825} 3826EXPORT_SYMBOL(interruptible_sleep_on); 3827 3828long __sched 3829interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) 3830{ 3831 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); 3832} 3833EXPORT_SYMBOL(interruptible_sleep_on_timeout); 3834 3835void __sched sleep_on(wait_queue_head_t *q) 3836{ 3837 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 3838} 3839EXPORT_SYMBOL(sleep_on); 3840 3841long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) 3842{ 3843 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); 3844} 3845EXPORT_SYMBOL(sleep_on_timeout); 3846 3847#ifdef CONFIG_RT_MUTEXES 3848 3849/* 3850 * rt_mutex_setprio - set the current priority of a task 3851 * @p: task 3852 * @prio: prio value (kernel-internal form) 3853 * 3854 * This function changes the 'effective' priority of a task. It does 3855 * not touch ->normal_prio like __setscheduler(). 3856 * 3857 * Used by the rt_mutex code to implement priority inheritance logic. 3858 */ 3859void rt_mutex_setprio(struct task_struct *p, int prio) 3860{ 3861 int oldprio, on_rq, running; 3862 struct rq *rq; 3863 const struct sched_class *prev_class; 3864 3865 BUG_ON(prio < 0 || prio > MAX_PRIO); 3866 3867 rq = __task_rq_lock(p); 3868 3869 /* 3870 * Idle task boosting is a nono in general. There is one 3871 * exception, when PREEMPT_RT and NOHZ is active: 3872 * 3873 * The idle task calls get_next_timer_interrupt() and holds 3874 * the timer wheel base->lock on the CPU and another CPU wants 3875 * to access the timer (probably to cancel it). We can safely 3876 * ignore the boosting request, as the idle CPU runs this code 3877 * with interrupts disabled and will complete the lock 3878 * protected section without being interrupted. So there is no 3879 * real need to boost. 3880 */ 3881 if (unlikely(p == rq->idle)) { 3882 WARN_ON(p != rq->curr); 3883 WARN_ON(p->pi_blocked_on); 3884 goto out_unlock; 3885 } 3886 3887 trace_sched_pi_setprio(p, prio); 3888 oldprio = p->prio; 3889 prev_class = p->sched_class; 3890 on_rq = p->on_rq; 3891 running = task_current(rq, p); 3892 if (on_rq) 3893 dequeue_task(rq, p, 0); 3894 if (running) 3895 p->sched_class->put_prev_task(rq, p); 3896 3897 if (rt_prio(prio)) 3898 p->sched_class = &rt_sched_class; 3899 else 3900 p->sched_class = &fair_sched_class; 3901 3902 p->prio = prio; 3903 3904 if (running) 3905 p->sched_class->set_curr_task(rq); 3906 if (on_rq) 3907 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); 3908 3909 check_class_changed(rq, p, prev_class, oldprio); 3910out_unlock: 3911 __task_rq_unlock(rq); 3912} 3913#endif 3914void set_user_nice(struct task_struct *p, long nice) 3915{ 3916 int old_prio, delta, on_rq; 3917 unsigned long flags; 3918 struct rq *rq; 3919 3920 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 3921 return; 3922 /* 3923 * We have to be careful, if called from sys_setpriority(), 3924 * the task might be in the middle of scheduling on another CPU. 3925 */ 3926 rq = task_rq_lock(p, &flags); 3927 /* 3928 * The RT priorities are set via sched_setscheduler(), but we still 3929 * allow the 'normal' nice value to be set - but as expected 3930 * it wont have any effect on scheduling until the task is 3931 * SCHED_FIFO/SCHED_RR: 3932 */ 3933 if (task_has_rt_policy(p)) { 3934 p->static_prio = NICE_TO_PRIO(nice); 3935 goto out_unlock; 3936 } 3937 on_rq = p->on_rq; 3938 if (on_rq) 3939 dequeue_task(rq, p, 0); 3940 3941 p->static_prio = NICE_TO_PRIO(nice); 3942 set_load_weight(p); 3943 old_prio = p->prio; 3944 p->prio = effective_prio(p); 3945 delta = p->prio - old_prio; 3946 3947 if (on_rq) { 3948 enqueue_task(rq, p, 0); 3949 /* 3950 * If the task increased its priority or is running and 3951 * lowered its priority, then reschedule its CPU: 3952 */ 3953 if (delta < 0 || (delta > 0 && task_running(rq, p))) 3954 resched_task(rq->curr); 3955 } 3956out_unlock: 3957 task_rq_unlock(rq, p, &flags); 3958} 3959EXPORT_SYMBOL(set_user_nice); 3960 3961/* 3962 * can_nice - check if a task can reduce its nice value 3963 * @p: task 3964 * @nice: nice value 3965 */ 3966int can_nice(const struct task_struct *p, const int nice) 3967{ 3968 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3969 int nice_rlim = 20 - nice; 3970 3971 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 3972 capable(CAP_SYS_NICE)); 3973} 3974 3975#ifdef __ARCH_WANT_SYS_NICE 3976 3977/* 3978 * sys_nice - change the priority of the current process. 3979 * @increment: priority increment 3980 * 3981 * sys_setpriority is a more generic, but much slower function that 3982 * does similar things. 3983 */ 3984SYSCALL_DEFINE1(nice, int, increment) 3985{ 3986 long nice, retval; 3987 3988 /* 3989 * Setpriority might change our priority at the same moment. 3990 * We don't have to worry. Conceptually one call occurs first 3991 * and we have a single winner. 3992 */ 3993 if (increment < -40) 3994 increment = -40; 3995 if (increment > 40) 3996 increment = 40; 3997 3998 nice = TASK_NICE(current) + increment; 3999 if (nice < -20) 4000 nice = -20; 4001 if (nice > 19) 4002 nice = 19; 4003 4004 if (increment < 0 && !can_nice(current, nice)) 4005 return -EPERM; 4006 4007 retval = security_task_setnice(current, nice); 4008 if (retval) 4009 return retval; 4010 4011 set_user_nice(current, nice); 4012 return 0; 4013} 4014 4015#endif 4016 4017/** 4018 * task_prio - return the priority value of a given task. 4019 * @p: the task in question. 4020 * 4021 * This is the priority value as seen by users in /proc. 4022 * RT tasks are offset by -200. Normal tasks are centered 4023 * around 0, value goes from -16 to +15. 4024 */ 4025int task_prio(const struct task_struct *p) 4026{ 4027 return p->prio - MAX_RT_PRIO; 4028} 4029 4030/** 4031 * task_nice - return the nice value of a given task. 4032 * @p: the task in question. 4033 */ 4034int task_nice(const struct task_struct *p) 4035{ 4036 return TASK_NICE(p); 4037} 4038EXPORT_SYMBOL(task_nice); 4039 4040/** 4041 * idle_cpu - is a given cpu idle currently? 4042 * @cpu: the processor in question. 4043 */ 4044int idle_cpu(int cpu) 4045{ 4046 struct rq *rq = cpu_rq(cpu); 4047 4048 if (rq->curr != rq->idle) 4049 return 0; 4050 4051 if (rq->nr_running) 4052 return 0; 4053 4054#ifdef CONFIG_SMP 4055 if (!llist_empty(&rq->wake_list)) 4056 return 0; 4057#endif 4058 4059 return 1; 4060} 4061 4062/** 4063 * idle_task - return the idle task for a given cpu. 4064 * @cpu: the processor in question. 4065 */ 4066struct task_struct *idle_task(int cpu) 4067{ 4068 return cpu_rq(cpu)->idle; 4069} 4070 4071/** 4072 * find_process_by_pid - find a process with a matching PID value. 4073 * @pid: the pid in question. 4074 */ 4075static struct task_struct *find_process_by_pid(pid_t pid) 4076{ 4077 return pid ? find_task_by_vpid(pid) : current; 4078} 4079 4080/* Actually do priority change: must hold rq lock. */ 4081static void 4082__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) 4083{ 4084 p->policy = policy; 4085 p->rt_priority = prio; 4086 p->normal_prio = normal_prio(p); 4087 /* we are holding p->pi_lock already */ 4088 p->prio = rt_mutex_getprio(p); 4089 if (rt_prio(p->prio)) 4090 p->sched_class = &rt_sched_class; 4091 else 4092 p->sched_class = &fair_sched_class; 4093 set_load_weight(p); 4094} 4095 4096/* 4097 * check the target process has a UID that matches the current process's 4098 */ 4099static bool check_same_owner(struct task_struct *p) 4100{ 4101 const struct cred *cred = current_cred(), *pcred; 4102 bool match; 4103 4104 rcu_read_lock(); 4105 pcred = __task_cred(p); 4106 match = (uid_eq(cred->euid, pcred->euid) || 4107 uid_eq(cred->euid, pcred->uid)); 4108 rcu_read_unlock(); 4109 return match; 4110} 4111 4112static int __sched_setscheduler(struct task_struct *p, int policy, 4113 const struct sched_param *param, bool user) 4114{ 4115 int retval, oldprio, oldpolicy = -1, on_rq, running; 4116 unsigned long flags; 4117 const struct sched_class *prev_class; 4118 struct rq *rq; 4119 int reset_on_fork; 4120 4121 /* may grab non-irq protected spin_locks */ 4122 BUG_ON(in_interrupt()); 4123recheck: 4124 /* double check policy once rq lock held */ 4125 if (policy < 0) { 4126 reset_on_fork = p->sched_reset_on_fork; 4127 policy = oldpolicy = p->policy; 4128 } else { 4129 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); 4130 policy &= ~SCHED_RESET_ON_FORK; 4131 4132 if (policy != SCHED_FIFO && policy != SCHED_RR && 4133 policy != SCHED_NORMAL && policy != SCHED_BATCH && 4134 policy != SCHED_IDLE) 4135 return -EINVAL; 4136 } 4137 4138 /* 4139 * Valid priorities for SCHED_FIFO and SCHED_RR are 4140 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 4141 * SCHED_BATCH and SCHED_IDLE is 0. 4142 */ 4143 if (param->sched_priority < 0 || 4144 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || 4145 (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) 4146 return -EINVAL; 4147 if (rt_policy(policy) != (param->sched_priority != 0)) 4148 return -EINVAL; 4149 4150 /* 4151 * Allow unprivileged RT tasks to decrease priority: 4152 */ 4153 if (user && !capable(CAP_SYS_NICE)) { 4154 if (rt_policy(policy)) { 4155 unsigned long rlim_rtprio = 4156 task_rlimit(p, RLIMIT_RTPRIO); 4157 4158 /* can't set/change the rt policy */ 4159 if (policy != p->policy && !rlim_rtprio) 4160 return -EPERM; 4161 4162 /* can't increase priority */ 4163 if (param->sched_priority > p->rt_priority && 4164 param->sched_priority > rlim_rtprio) 4165 return -EPERM; 4166 } 4167 4168 /* 4169 * Treat SCHED_IDLE as nice 20. Only allow a switch to 4170 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 4171 */ 4172 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { 4173 if (!can_nice(p, TASK_NICE(p))) 4174 return -EPERM; 4175 } 4176 4177 /* can't change other user's priorities */ 4178 if (!check_same_owner(p)) 4179 return -EPERM; 4180 4181 /* Normal users shall not reset the sched_reset_on_fork flag */ 4182 if (p->sched_reset_on_fork && !reset_on_fork) 4183 return -EPERM; 4184 } 4185 4186 if (user) { 4187 retval = security_task_setscheduler(p); 4188 if (retval) 4189 return retval; 4190 } 4191 4192 /* 4193 * make sure no PI-waiters arrive (or leave) while we are 4194 * changing the priority of the task: 4195 * 4196 * To be able to change p->policy safely, the appropriate 4197 * runqueue lock must be held. 4198 */ 4199 rq = task_rq_lock(p, &flags); 4200 4201 /* 4202 * Changing the policy of the stop threads its a very bad idea 4203 */ 4204 if (p == rq->stop) { 4205 task_rq_unlock(rq, p, &flags); 4206 return -EINVAL; 4207 } 4208 4209 /* 4210 * If not changing anything there's no need to proceed further: 4211 */ 4212 if (unlikely(policy == p->policy && (!rt_policy(policy) || 4213 param->sched_priority == p->rt_priority))) { 4214 4215 __task_rq_unlock(rq); 4216 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4217 return 0; 4218 } 4219 4220#ifdef CONFIG_RT_GROUP_SCHED 4221 if (user) { 4222 /* 4223 * Do not allow realtime tasks into groups that have no runtime 4224 * assigned. 4225 */ 4226 if (rt_bandwidth_enabled() && rt_policy(policy) && 4227 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4228 !task_group_is_autogroup(task_group(p))) { 4229 task_rq_unlock(rq, p, &flags); 4230 return -EPERM; 4231 } 4232 } 4233#endif 4234 4235 /* recheck policy now with rq lock held */ 4236 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4237 policy = oldpolicy = -1; 4238 task_rq_unlock(rq, p, &flags); 4239 goto recheck; 4240 } 4241 on_rq = p->on_rq; 4242 running = task_current(rq, p); 4243 if (on_rq) 4244 dequeue_task(rq, p, 0); 4245 if (running) 4246 p->sched_class->put_prev_task(rq, p); 4247 4248 p->sched_reset_on_fork = reset_on_fork; 4249 4250 oldprio = p->prio; 4251 prev_class = p->sched_class; 4252 __setscheduler(rq, p, policy, param->sched_priority); 4253 4254 if (running) 4255 p->sched_class->set_curr_task(rq); 4256 if (on_rq) 4257 enqueue_task(rq, p, 0); 4258 4259 check_class_changed(rq, p, prev_class, oldprio); 4260 task_rq_unlock(rq, p, &flags); 4261 4262 rt_mutex_adjust_pi(p); 4263 4264 return 0; 4265} 4266 4267/** 4268 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 4269 * @p: the task in question. 4270 * @policy: new policy. 4271 * @param: structure containing the new RT priority. 4272 * 4273 * NOTE that the task may be already dead. 4274 */ 4275int sched_setscheduler(struct task_struct *p, int policy, 4276 const struct sched_param *param) 4277{ 4278 return __sched_setscheduler(p, policy, param, true); 4279} 4280EXPORT_SYMBOL_GPL(sched_setscheduler); 4281 4282/** 4283 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 4284 * @p: the task in question. 4285 * @policy: new policy. 4286 * @param: structure containing the new RT priority. 4287 * 4288 * Just like sched_setscheduler, only don't bother checking if the 4289 * current context has permission. For example, this is needed in 4290 * stop_machine(): we create temporary high priority worker threads, 4291 * but our caller might not have that capability. 4292 */ 4293int sched_setscheduler_nocheck(struct task_struct *p, int policy, 4294 const struct sched_param *param) 4295{ 4296 return __sched_setscheduler(p, policy, param, false); 4297} 4298 4299static int 4300do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4301{ 4302 struct sched_param lparam; 4303 struct task_struct *p; 4304 int retval; 4305 4306 if (!param || pid < 0) 4307 return -EINVAL; 4308 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 4309 return -EFAULT; 4310 4311 rcu_read_lock(); 4312 retval = -ESRCH; 4313 p = find_process_by_pid(pid); 4314 if (p != NULL) 4315 retval = sched_setscheduler(p, policy, &lparam); 4316 rcu_read_unlock(); 4317 4318 return retval; 4319} 4320 4321/** 4322 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 4323 * @pid: the pid in question. 4324 * @policy: new policy. 4325 * @param: structure containing the new RT priority. 4326 */ 4327SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 4328 struct sched_param __user *, param) 4329{ 4330 /* negative values for policy are not valid */ 4331 if (policy < 0) 4332 return -EINVAL; 4333 4334 return do_sched_setscheduler(pid, policy, param); 4335} 4336 4337/** 4338 * sys_sched_setparam - set/change the RT priority of a thread 4339 * @pid: the pid in question. 4340 * @param: structure containing the new RT priority. 4341 */ 4342SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 4343{ 4344 return do_sched_setscheduler(pid, -1, param); 4345} 4346 4347/** 4348 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 4349 * @pid: the pid in question. 4350 */ 4351SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 4352{ 4353 struct task_struct *p; 4354 int retval; 4355 4356 if (pid < 0) 4357 return -EINVAL; 4358 4359 retval = -ESRCH; 4360 rcu_read_lock(); 4361 p = find_process_by_pid(pid); 4362 if (p) { 4363 retval = security_task_getscheduler(p); 4364 if (!retval) 4365 retval = p->policy 4366 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 4367 } 4368 rcu_read_unlock(); 4369 return retval; 4370} 4371 4372/** 4373 * sys_sched_getparam - get the RT priority of a thread 4374 * @pid: the pid in question. 4375 * @param: structure containing the RT priority. 4376 */ 4377SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 4378{ 4379 struct sched_param lp; 4380 struct task_struct *p; 4381 int retval; 4382 4383 if (!param || pid < 0) 4384 return -EINVAL; 4385 4386 rcu_read_lock(); 4387 p = find_process_by_pid(pid); 4388 retval = -ESRCH; 4389 if (!p) 4390 goto out_unlock; 4391 4392 retval = security_task_getscheduler(p); 4393 if (retval) 4394 goto out_unlock; 4395 4396 lp.sched_priority = p->rt_priority; 4397 rcu_read_unlock(); 4398 4399 /* 4400 * This one might sleep, we cannot do it with a spinlock held ... 4401 */ 4402 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 4403 4404 return retval; 4405 4406out_unlock: 4407 rcu_read_unlock(); 4408 return retval; 4409} 4410 4411long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 4412{ 4413 cpumask_var_t cpus_allowed, new_mask; 4414 struct task_struct *p; 4415 int retval; 4416 4417 get_online_cpus(); 4418 rcu_read_lock(); 4419 4420 p = find_process_by_pid(pid); 4421 if (!p) { 4422 rcu_read_unlock(); 4423 put_online_cpus(); 4424 return -ESRCH; 4425 } 4426 4427 /* Prevent p going away */ 4428 get_task_struct(p); 4429 rcu_read_unlock(); 4430 4431 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4432 retval = -ENOMEM; 4433 goto out_put_task; 4434 } 4435 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 4436 retval = -ENOMEM; 4437 goto out_free_cpus_allowed; 4438 } 4439 retval = -EPERM; 4440 if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE)) 4441 goto out_unlock; 4442 4443 retval = security_task_setscheduler(p); 4444 if (retval) 4445 goto out_unlock; 4446 4447 cpuset_cpus_allowed(p, cpus_allowed); 4448 cpumask_and(new_mask, in_mask, cpus_allowed); 4449again: 4450 retval = set_cpus_allowed_ptr(p, new_mask); 4451 4452 if (!retval) { 4453 cpuset_cpus_allowed(p, cpus_allowed); 4454 if (!cpumask_subset(new_mask, cpus_allowed)) { 4455 /* 4456 * We must have raced with a concurrent cpuset 4457 * update. Just reset the cpus_allowed to the 4458 * cpuset's cpus_allowed 4459 */ 4460 cpumask_copy(new_mask, cpus_allowed); 4461 goto again; 4462 } 4463 } 4464out_unlock: 4465 free_cpumask_var(new_mask); 4466out_free_cpus_allowed: 4467 free_cpumask_var(cpus_allowed); 4468out_put_task: 4469 put_task_struct(p); 4470 put_online_cpus(); 4471 return retval; 4472} 4473 4474static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 4475 struct cpumask *new_mask) 4476{ 4477 if (len < cpumask_size()) 4478 cpumask_clear(new_mask); 4479 else if (len > cpumask_size()) 4480 len = cpumask_size(); 4481 4482 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 4483} 4484 4485/** 4486 * sys_sched_setaffinity - set the cpu affinity of a process 4487 * @pid: pid of the process 4488 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4489 * @user_mask_ptr: user-space pointer to the new cpu mask 4490 */ 4491SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 4492 unsigned long __user *, user_mask_ptr) 4493{ 4494 cpumask_var_t new_mask; 4495 int retval; 4496 4497 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4498 return -ENOMEM; 4499 4500 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 4501 if (retval == 0) 4502 retval = sched_setaffinity(pid, new_mask); 4503 free_cpumask_var(new_mask); 4504 return retval; 4505} 4506 4507long sched_getaffinity(pid_t pid, struct cpumask *mask) 4508{ 4509 struct task_struct *p; 4510 unsigned long flags; 4511 int retval; 4512 4513 get_online_cpus(); 4514 rcu_read_lock(); 4515 4516 retval = -ESRCH; 4517 p = find_process_by_pid(pid); 4518 if (!p) 4519 goto out_unlock; 4520 4521 retval = security_task_getscheduler(p); 4522 if (retval) 4523 goto out_unlock; 4524 4525 raw_spin_lock_irqsave(&p->pi_lock, flags); 4526 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); 4527 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4528 4529out_unlock: 4530 rcu_read_unlock(); 4531 put_online_cpus(); 4532 4533 return retval; 4534} 4535 4536/** 4537 * sys_sched_getaffinity - get the cpu affinity of a process 4538 * @pid: pid of the process 4539 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4540 * @user_mask_ptr: user-space pointer to hold the current cpu mask 4541 */ 4542SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 4543 unsigned long __user *, user_mask_ptr) 4544{ 4545 int ret; 4546 cpumask_var_t mask; 4547 4548 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 4549 return -EINVAL; 4550 if (len & (sizeof(unsigned long)-1)) 4551 return -EINVAL; 4552 4553 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4554 return -ENOMEM; 4555 4556 ret = sched_getaffinity(pid, mask); 4557 if (ret == 0) { 4558 size_t retlen = min_t(size_t, len, cpumask_size()); 4559 4560 if (copy_to_user(user_mask_ptr, mask, retlen)) 4561 ret = -EFAULT; 4562 else 4563 ret = retlen; 4564 } 4565 free_cpumask_var(mask); 4566 4567 return ret; 4568} 4569 4570/** 4571 * sys_sched_yield - yield the current processor to other threads. 4572 * 4573 * This function yields the current CPU to other tasks. If there are no 4574 * other threads running on this CPU then this function will return. 4575 */ 4576SYSCALL_DEFINE0(sched_yield) 4577{ 4578 struct rq *rq = this_rq_lock(); 4579 4580 schedstat_inc(rq, yld_count); 4581 current->sched_class->yield_task(rq); 4582 4583 /* 4584 * Since we are going to call schedule() anyway, there's 4585 * no need to preempt or enable interrupts: 4586 */ 4587 __release(rq->lock); 4588 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4589 do_raw_spin_unlock(&rq->lock); 4590 sched_preempt_enable_no_resched(); 4591 4592 schedule(); 4593 4594 return 0; 4595} 4596 4597static inline int should_resched(void) 4598{ 4599 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); 4600} 4601 4602static void __cond_resched(void) 4603{ 4604 add_preempt_count(PREEMPT_ACTIVE); 4605 __schedule(); 4606 sub_preempt_count(PREEMPT_ACTIVE); 4607} 4608 4609int __sched _cond_resched(void) 4610{ 4611 if (should_resched()) { 4612 __cond_resched(); 4613 return 1; 4614 } 4615 return 0; 4616} 4617EXPORT_SYMBOL(_cond_resched); 4618 4619/* 4620 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 4621 * call schedule, and on return reacquire the lock. 4622 * 4623 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 4624 * operations here to prevent schedule() from being called twice (once via 4625 * spin_unlock(), once by hand). 4626 */ 4627int __cond_resched_lock(spinlock_t *lock) 4628{ 4629 int resched = should_resched(); 4630 int ret = 0; 4631 4632 lockdep_assert_held(lock); 4633 4634 if (spin_needbreak(lock) || resched) { 4635 spin_unlock(lock); 4636 if (resched) 4637 __cond_resched(); 4638 else 4639 cpu_relax(); 4640 ret = 1; 4641 spin_lock(lock); 4642 } 4643 return ret; 4644} 4645EXPORT_SYMBOL(__cond_resched_lock); 4646 4647int __sched __cond_resched_softirq(void) 4648{ 4649 BUG_ON(!in_softirq()); 4650 4651 if (should_resched()) { 4652 local_bh_enable(); 4653 __cond_resched(); 4654 local_bh_disable(); 4655 return 1; 4656 } 4657 return 0; 4658} 4659EXPORT_SYMBOL(__cond_resched_softirq); 4660 4661/** 4662 * yield - yield the current processor to other threads. 4663 * 4664 * Do not ever use this function, there's a 99% chance you're doing it wrong. 4665 * 4666 * The scheduler is at all times free to pick the calling task as the most 4667 * eligible task to run, if removing the yield() call from your code breaks 4668 * it, its already broken. 4669 * 4670 * Typical broken usage is: 4671 * 4672 * while (!event) 4673 * yield(); 4674 * 4675 * where one assumes that yield() will let 'the other' process run that will 4676 * make event true. If the current task is a SCHED_FIFO task that will never 4677 * happen. Never use yield() as a progress guarantee!! 4678 * 4679 * If you want to use yield() to wait for something, use wait_event(). 4680 * If you want to use yield() to be 'nice' for others, use cond_resched(). 4681 * If you still want to use yield(), do not! 4682 */ 4683void __sched yield(void) 4684{ 4685 set_current_state(TASK_RUNNING); 4686 sys_sched_yield(); 4687} 4688EXPORT_SYMBOL(yield); 4689 4690/** 4691 * yield_to - yield the current processor to another thread in 4692 * your thread group, or accelerate that thread toward the 4693 * processor it's on. 4694 * @p: target task 4695 * @preempt: whether task preemption is allowed or not 4696 * 4697 * It's the caller's job to ensure that the target task struct 4698 * can't go away on us before we can do any checks. 4699 * 4700 * Returns true if we indeed boosted the target task. 4701 */ 4702bool __sched yield_to(struct task_struct *p, bool preempt) 4703{ 4704 struct task_struct *curr = current; 4705 struct rq *rq, *p_rq; 4706 unsigned long flags; 4707 bool yielded = 0; 4708 4709 local_irq_save(flags); 4710 rq = this_rq(); 4711 4712again: 4713 p_rq = task_rq(p); 4714 double_rq_lock(rq, p_rq); 4715 while (task_rq(p) != p_rq) { 4716 double_rq_unlock(rq, p_rq); 4717 goto again; 4718 } 4719 4720 if (!curr->sched_class->yield_to_task) 4721 goto out; 4722 4723 if (curr->sched_class != p->sched_class) 4724 goto out; 4725 4726 if (task_running(p_rq, p) || p->state) 4727 goto out; 4728 4729 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4730 if (yielded) { 4731 schedstat_inc(rq, yld_count); 4732 /* 4733 * Make p's CPU reschedule; pick_next_entity takes care of 4734 * fairness. 4735 */ 4736 if (preempt && rq != p_rq) 4737 resched_task(p_rq->curr); 4738 } else { 4739 /* 4740 * We might have set it in task_yield_fair(), but are 4741 * not going to schedule(), so don't want to skip 4742 * the next update. 4743 */ 4744 rq->skip_clock_update = 0; 4745 } 4746 4747out: 4748 double_rq_unlock(rq, p_rq); 4749 local_irq_restore(flags); 4750 4751 if (yielded) 4752 schedule(); 4753 4754 return yielded; 4755} 4756EXPORT_SYMBOL_GPL(yield_to); 4757 4758/* 4759 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4760 * that process accounting knows that this is a task in IO wait state. 4761 */ 4762void __sched io_schedule(void) 4763{ 4764 struct rq *rq = raw_rq(); 4765 4766 delayacct_blkio_start(); 4767 atomic_inc(&rq->nr_iowait); 4768 blk_flush_plug(current); 4769 current->in_iowait = 1; 4770 schedule(); 4771 current->in_iowait = 0; 4772 atomic_dec(&rq->nr_iowait); 4773 delayacct_blkio_end(); 4774} 4775EXPORT_SYMBOL(io_schedule); 4776 4777long __sched io_schedule_timeout(long timeout) 4778{ 4779 struct rq *rq = raw_rq(); 4780 long ret; 4781 4782 delayacct_blkio_start(); 4783 atomic_inc(&rq->nr_iowait); 4784 blk_flush_plug(current); 4785 current->in_iowait = 1; 4786 ret = schedule_timeout(timeout); 4787 current->in_iowait = 0; 4788 atomic_dec(&rq->nr_iowait); 4789 delayacct_blkio_end(); 4790 return ret; 4791} 4792 4793/** 4794 * sys_sched_get_priority_max - return maximum RT priority. 4795 * @policy: scheduling class. 4796 * 4797 * this syscall returns the maximum rt_priority that can be used 4798 * by a given scheduling class. 4799 */ 4800SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4801{ 4802 int ret = -EINVAL; 4803 4804 switch (policy) { 4805 case SCHED_FIFO: 4806 case SCHED_RR: 4807 ret = MAX_USER_RT_PRIO-1; 4808 break; 4809 case SCHED_NORMAL: 4810 case SCHED_BATCH: 4811 case SCHED_IDLE: 4812 ret = 0; 4813 break; 4814 } 4815 return ret; 4816} 4817 4818/** 4819 * sys_sched_get_priority_min - return minimum RT priority. 4820 * @policy: scheduling class. 4821 * 4822 * this syscall returns the minimum rt_priority that can be used 4823 * by a given scheduling class. 4824 */ 4825SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4826{ 4827 int ret = -EINVAL; 4828 4829 switch (policy) { 4830 case SCHED_FIFO: 4831 case SCHED_RR: 4832 ret = 1; 4833 break; 4834 case SCHED_NORMAL: 4835 case SCHED_BATCH: 4836 case SCHED_IDLE: 4837 ret = 0; 4838 } 4839 return ret; 4840} 4841 4842/** 4843 * sys_sched_rr_get_interval - return the default timeslice of a process. 4844 * @pid: pid of the process. 4845 * @interval: userspace pointer to the timeslice value. 4846 * 4847 * this syscall writes the default timeslice value of a given process 4848 * into the user-space timespec buffer. A value of '0' means infinity. 4849 */ 4850SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4851 struct timespec __user *, interval) 4852{ 4853 struct task_struct *p; 4854 unsigned int time_slice; 4855 unsigned long flags; 4856 struct rq *rq; 4857 int retval; 4858 struct timespec t; 4859 4860 if (pid < 0) 4861 return -EINVAL; 4862 4863 retval = -ESRCH; 4864 rcu_read_lock(); 4865 p = find_process_by_pid(pid); 4866 if (!p) 4867 goto out_unlock; 4868 4869 retval = security_task_getscheduler(p); 4870 if (retval) 4871 goto out_unlock; 4872 4873 rq = task_rq_lock(p, &flags); 4874 time_slice = p->sched_class->get_rr_interval(rq, p); 4875 task_rq_unlock(rq, p, &flags); 4876 4877 rcu_read_unlock(); 4878 jiffies_to_timespec(time_slice, &t); 4879 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4880 return retval; 4881 4882out_unlock: 4883 rcu_read_unlock(); 4884 return retval; 4885} 4886 4887static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 4888 4889void sched_show_task(struct task_struct *p) 4890{ 4891 unsigned long free = 0; 4892 unsigned state; 4893 4894 state = p->state ? __ffs(p->state) + 1 : 0; 4895 printk(KERN_INFO "%-15.15s %c", p->comm, 4896 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4897#if BITS_PER_LONG == 32 4898 if (state == TASK_RUNNING) 4899 printk(KERN_CONT " running "); 4900 else 4901 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 4902#else 4903 if (state == TASK_RUNNING) 4904 printk(KERN_CONT " running task "); 4905 else 4906 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 4907#endif 4908#ifdef CONFIG_DEBUG_STACK_USAGE 4909 free = stack_not_used(p); 4910#endif 4911 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 4912 task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)), 4913 (unsigned long)task_thread_info(p)->flags); 4914 4915 show_stack(p, NULL); 4916} 4917 4918void show_state_filter(unsigned long state_filter) 4919{ 4920 struct task_struct *g, *p; 4921 4922#if BITS_PER_LONG == 32 4923 printk(KERN_INFO 4924 " task PC stack pid father\n"); 4925#else 4926 printk(KERN_INFO 4927 " task PC stack pid father\n"); 4928#endif 4929 rcu_read_lock(); 4930 do_each_thread(g, p) { 4931 /* 4932 * reset the NMI-timeout, listing all files on a slow 4933 * console might take a lot of time: 4934 */ 4935 touch_nmi_watchdog(); 4936 if (!state_filter || (p->state & state_filter)) 4937 sched_show_task(p); 4938 } while_each_thread(g, p); 4939 4940 touch_all_softlockup_watchdogs(); 4941 4942#ifdef CONFIG_SCHED_DEBUG 4943 sysrq_sched_debug_show(); 4944#endif 4945 rcu_read_unlock(); 4946 /* 4947 * Only show locks if all tasks are dumped: 4948 */ 4949 if (!state_filter) 4950 debug_show_all_locks(); 4951} 4952 4953void __cpuinit init_idle_bootup_task(struct task_struct *idle) 4954{ 4955 idle->sched_class = &idle_sched_class; 4956} 4957 4958/** 4959 * init_idle - set up an idle thread for a given CPU 4960 * @idle: task in question 4961 * @cpu: cpu the idle task belongs to 4962 * 4963 * NOTE: this function does not set the idle thread's NEED_RESCHED 4964 * flag, to make booting more robust. 4965 */ 4966void __cpuinit init_idle(struct task_struct *idle, int cpu) 4967{ 4968 struct rq *rq = cpu_rq(cpu); 4969 unsigned long flags; 4970 4971 raw_spin_lock_irqsave(&rq->lock, flags); 4972 4973 __sched_fork(idle); 4974 idle->state = TASK_RUNNING; 4975 idle->se.exec_start = sched_clock(); 4976 4977 do_set_cpus_allowed(idle, cpumask_of(cpu)); 4978 /* 4979 * We're having a chicken and egg problem, even though we are 4980 * holding rq->lock, the cpu isn't yet set to this cpu so the 4981 * lockdep check in task_group() will fail. 4982 * 4983 * Similar case to sched_fork(). / Alternatively we could 4984 * use task_rq_lock() here and obtain the other rq->lock. 4985 * 4986 * Silence PROVE_RCU 4987 */ 4988 rcu_read_lock(); 4989 __set_task_cpu(idle, cpu); 4990 rcu_read_unlock(); 4991 4992 rq->curr = rq->idle = idle; 4993#if defined(CONFIG_SMP) 4994 idle->on_cpu = 1; 4995#endif 4996 raw_spin_unlock_irqrestore(&rq->lock, flags); 4997 4998 /* Set the preempt count _outside_ the spinlocks! */ 4999 task_thread_info(idle)->preempt_count = 0; 5000 5001 /* 5002 * The idle tasks have their own, simple scheduling class: 5003 */ 5004 idle->sched_class = &idle_sched_class; 5005 ftrace_graph_init_idle_task(idle, cpu); 5006#if defined(CONFIG_SMP) 5007 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 5008#endif 5009} 5010 5011#ifdef CONFIG_SMP 5012void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 5013{ 5014 if (p->sched_class && p->sched_class->set_cpus_allowed) 5015 p->sched_class->set_cpus_allowed(p, new_mask); 5016 5017 cpumask_copy(&p->cpus_allowed, new_mask); 5018 p->nr_cpus_allowed = cpumask_weight(new_mask); 5019} 5020 5021/* 5022 * This is how migration works: 5023 * 5024 * 1) we invoke migration_cpu_stop() on the target CPU using 5025 * stop_one_cpu(). 5026 * 2) stopper starts to run (implicitly forcing the migrated thread 5027 * off the CPU) 5028 * 3) it checks whether the migrated task is still in the wrong runqueue. 5029 * 4) if it's in the wrong runqueue then the migration thread removes 5030 * it and puts it into the right queue. 5031 * 5) stopper completes and stop_one_cpu() returns and the migration 5032 * is done. 5033 */ 5034 5035/* 5036 * Change a given task's CPU affinity. Migrate the thread to a 5037 * proper CPU and schedule it away if the CPU it's executing on 5038 * is removed from the allowed bitmask. 5039 * 5040 * NOTE: the caller must have a valid reference to the task, the 5041 * task must not exit() & deallocate itself prematurely. The 5042 * call is not atomic; no spinlocks may be held. 5043 */ 5044int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 5045{ 5046 unsigned long flags; 5047 struct rq *rq; 5048 unsigned int dest_cpu; 5049 int ret = 0; 5050 5051 rq = task_rq_lock(p, &flags); 5052 5053 if (cpumask_equal(&p->cpus_allowed, new_mask)) 5054 goto out; 5055 5056 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 5057 ret = -EINVAL; 5058 goto out; 5059 } 5060 5061 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) { 5062 ret = -EINVAL; 5063 goto out; 5064 } 5065 5066 do_set_cpus_allowed(p, new_mask); 5067 5068 /* Can the task run on the task's current CPU? If so, we're done */ 5069 if (cpumask_test_cpu(task_cpu(p), new_mask)) 5070 goto out; 5071 5072 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 5073 if (p->on_rq) { 5074 struct migration_arg arg = { p, dest_cpu }; 5075 /* Need help from migration thread: drop lock and wait. */ 5076 task_rq_unlock(rq, p, &flags); 5077 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 5078 tlb_migrate_finish(p->mm); 5079 return 0; 5080 } 5081out: 5082 task_rq_unlock(rq, p, &flags); 5083 5084 return ret; 5085} 5086EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 5087 5088/* 5089 * Move (not current) task off this cpu, onto dest cpu. We're doing 5090 * this because either it can't run here any more (set_cpus_allowed() 5091 * away from this CPU, or CPU going down), or because we're 5092 * attempting to rebalance this task on exec (sched_exec). 5093 * 5094 * So we race with normal scheduler movements, but that's OK, as long 5095 * as the task is no longer on this CPU. 5096 * 5097 * Returns non-zero if task was successfully migrated. 5098 */ 5099static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 5100{ 5101 struct rq *rq_dest, *rq_src; 5102 int ret = 0; 5103 5104 if (unlikely(!cpu_active(dest_cpu))) 5105 return ret; 5106 5107 rq_src = cpu_rq(src_cpu); 5108 rq_dest = cpu_rq(dest_cpu); 5109 5110 raw_spin_lock(&p->pi_lock); 5111 double_rq_lock(rq_src, rq_dest); 5112 /* Already moved. */ 5113 if (task_cpu(p) != src_cpu) 5114 goto done; 5115 /* Affinity changed (again). */ 5116 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 5117 goto fail; 5118 5119 /* 5120 * If we're not on a rq, the next wake-up will ensure we're 5121 * placed properly. 5122 */ 5123 if (p->on_rq) { 5124 dequeue_task(rq_src, p, 0); 5125 set_task_cpu(p, dest_cpu); 5126 enqueue_task(rq_dest, p, 0); 5127 check_preempt_curr(rq_dest, p, 0); 5128 } 5129done: 5130 ret = 1; 5131fail: 5132 double_rq_unlock(rq_src, rq_dest); 5133 raw_spin_unlock(&p->pi_lock); 5134 return ret; 5135} 5136 5137/* 5138 * migration_cpu_stop - this will be executed by a highprio stopper thread 5139 * and performs thread migration by bumping thread off CPU then 5140 * 'pushing' onto another runqueue. 5141 */ 5142static int migration_cpu_stop(void *data) 5143{ 5144 struct migration_arg *arg = data; 5145 5146 /* 5147 * The original target cpu might have gone down and we might 5148 * be on another cpu but it doesn't matter. 5149 */ 5150 local_irq_disable(); 5151 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); 5152 local_irq_enable(); 5153 return 0; 5154} 5155 5156#ifdef CONFIG_HOTPLUG_CPU 5157 5158/* 5159 * Ensures that the idle task is using init_mm right before its cpu goes 5160 * offline. 5161 */ 5162void idle_task_exit(void) 5163{ 5164 struct mm_struct *mm = current->active_mm; 5165 5166 BUG_ON(cpu_online(smp_processor_id())); 5167 5168 if (mm != &init_mm) 5169 switch_mm(mm, &init_mm, current); 5170 mmdrop(mm); 5171} 5172 5173/* 5174 * While a dead CPU has no uninterruptible tasks queued at this point, 5175 * it might still have a nonzero ->nr_uninterruptible counter, because 5176 * for performance reasons the counter is not stricly tracking tasks to 5177 * their home CPUs. So we just add the counter to another CPU's counter, 5178 * to keep the global sum constant after CPU-down: 5179 */ 5180static void migrate_nr_uninterruptible(struct rq *rq_src) 5181{ 5182 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); 5183 5184 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; 5185 rq_src->nr_uninterruptible = 0; 5186} 5187 5188/* 5189 * remove the tasks which were accounted by rq from calc_load_tasks. 5190 */ 5191static void calc_global_load_remove(struct rq *rq) 5192{ 5193 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 5194 rq->calc_load_active = 0; 5195} 5196 5197/* 5198 * Migrate all tasks from the rq, sleeping tasks will be migrated by 5199 * try_to_wake_up()->select_task_rq(). 5200 * 5201 * Called with rq->lock held even though we'er in stop_machine() and 5202 * there's no concurrency possible, we hold the required locks anyway 5203 * because of lock validation efforts. 5204 */ 5205static void migrate_tasks(unsigned int dead_cpu) 5206{ 5207 struct rq *rq = cpu_rq(dead_cpu); 5208 struct task_struct *next, *stop = rq->stop; 5209 int dest_cpu; 5210 5211 /* 5212 * Fudge the rq selection such that the below task selection loop 5213 * doesn't get stuck on the currently eligible stop task. 5214 * 5215 * We're currently inside stop_machine() and the rq is either stuck 5216 * in the stop_machine_cpu_stop() loop, or we're executing this code, 5217 * either way we should never end up calling schedule() until we're 5218 * done here. 5219 */ 5220 rq->stop = NULL; 5221 5222 /* Ensure any throttled groups are reachable by pick_next_task */ 5223 unthrottle_offline_cfs_rqs(rq); 5224 5225 for ( ; ; ) { 5226 /* 5227 * There's this thread running, bail when that's the only 5228 * remaining thread. 5229 */ 5230 if (rq->nr_running == 1) 5231 break; 5232 5233 next = pick_next_task(rq); 5234 BUG_ON(!next); 5235 next->sched_class->put_prev_task(rq, next); 5236 5237 /* Find suitable destination for @next, with force if needed. */ 5238 dest_cpu = select_fallback_rq(dead_cpu, next); 5239 raw_spin_unlock(&rq->lock); 5240 5241 __migrate_task(next, dead_cpu, dest_cpu); 5242 5243 raw_spin_lock(&rq->lock); 5244 } 5245 5246 rq->stop = stop; 5247} 5248 5249#endif /* CONFIG_HOTPLUG_CPU */ 5250 5251#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 5252 5253static struct ctl_table sd_ctl_dir[] = { 5254 { 5255 .procname = "sched_domain", 5256 .mode = 0555, 5257 }, 5258 {} 5259}; 5260 5261static struct ctl_table sd_ctl_root[] = { 5262 { 5263 .procname = "kernel", 5264 .mode = 0555, 5265 .child = sd_ctl_dir, 5266 }, 5267 {} 5268}; 5269 5270static struct ctl_table *sd_alloc_ctl_entry(int n) 5271{ 5272 struct ctl_table *entry = 5273 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 5274 5275 return entry; 5276} 5277 5278static void sd_free_ctl_entry(struct ctl_table **tablep) 5279{ 5280 struct ctl_table *entry; 5281 5282 /* 5283 * In the intermediate directories, both the child directory and 5284 * procname are dynamically allocated and could fail but the mode 5285 * will always be set. In the lowest directory the names are 5286 * static strings and all have proc handlers. 5287 */ 5288 for (entry = *tablep; entry->mode; entry++) { 5289 if (entry->child) 5290 sd_free_ctl_entry(&entry->child); 5291 if (entry->proc_handler == NULL) 5292 kfree(entry->procname); 5293 } 5294 5295 kfree(*tablep); 5296 *tablep = NULL; 5297} 5298 5299static void 5300set_table_entry(struct ctl_table *entry, 5301 const char *procname, void *data, int maxlen, 5302 umode_t mode, proc_handler *proc_handler) 5303{ 5304 entry->procname = procname; 5305 entry->data = data; 5306 entry->maxlen = maxlen; 5307 entry->mode = mode; 5308 entry->proc_handler = proc_handler; 5309} 5310 5311static struct ctl_table * 5312sd_alloc_ctl_domain_table(struct sched_domain *sd) 5313{ 5314 struct ctl_table *table = sd_alloc_ctl_entry(13); 5315 5316 if (table == NULL) 5317 return NULL; 5318 5319 set_table_entry(&table[0], "min_interval", &sd->min_interval, 5320 sizeof(long), 0644, proc_doulongvec_minmax); 5321 set_table_entry(&table[1], "max_interval", &sd->max_interval, 5322 sizeof(long), 0644, proc_doulongvec_minmax); 5323 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, 5324 sizeof(int), 0644, proc_dointvec_minmax); 5325 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, 5326 sizeof(int), 0644, proc_dointvec_minmax); 5327 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, 5328 sizeof(int), 0644, proc_dointvec_minmax); 5329 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, 5330 sizeof(int), 0644, proc_dointvec_minmax); 5331 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, 5332 sizeof(int), 0644, proc_dointvec_minmax); 5333 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, 5334 sizeof(int), 0644, proc_dointvec_minmax); 5335 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, 5336 sizeof(int), 0644, proc_dointvec_minmax); 5337 set_table_entry(&table[9], "cache_nice_tries", 5338 &sd->cache_nice_tries, 5339 sizeof(int), 0644, proc_dointvec_minmax); 5340 set_table_entry(&table[10], "flags", &sd->flags, 5341 sizeof(int), 0644, proc_dointvec_minmax); 5342 set_table_entry(&table[11], "name", sd->name, 5343 CORENAME_MAX_SIZE, 0444, proc_dostring); 5344 /* &table[12] is terminator */ 5345 5346 return table; 5347} 5348 5349static ctl_table *sd_alloc_ctl_cpu_table(int cpu) 5350{ 5351 struct ctl_table *entry, *table; 5352 struct sched_domain *sd; 5353 int domain_num = 0, i; 5354 char buf[32]; 5355 5356 for_each_domain(cpu, sd) 5357 domain_num++; 5358 entry = table = sd_alloc_ctl_entry(domain_num + 1); 5359 if (table == NULL) 5360 return NULL; 5361 5362 i = 0; 5363 for_each_domain(cpu, sd) { 5364 snprintf(buf, 32, "domain%d", i); 5365 entry->procname = kstrdup(buf, GFP_KERNEL); 5366 entry->mode = 0555; 5367 entry->child = sd_alloc_ctl_domain_table(sd); 5368 entry++; 5369 i++; 5370 } 5371 return table; 5372} 5373 5374static struct ctl_table_header *sd_sysctl_header; 5375static void register_sched_domain_sysctl(void) 5376{ 5377 int i, cpu_num = num_possible_cpus(); 5378 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 5379 char buf[32]; 5380 5381 WARN_ON(sd_ctl_dir[0].child); 5382 sd_ctl_dir[0].child = entry; 5383 5384 if (entry == NULL) 5385 return; 5386 5387 for_each_possible_cpu(i) { 5388 snprintf(buf, 32, "cpu%d", i); 5389 entry->procname = kstrdup(buf, GFP_KERNEL); 5390 entry->mode = 0555; 5391 entry->child = sd_alloc_ctl_cpu_table(i); 5392 entry++; 5393 } 5394 5395 WARN_ON(sd_sysctl_header); 5396 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 5397} 5398 5399/* may be called multiple times per register */ 5400static void unregister_sched_domain_sysctl(void) 5401{ 5402 if (sd_sysctl_header) 5403 unregister_sysctl_table(sd_sysctl_header); 5404 sd_sysctl_header = NULL; 5405 if (sd_ctl_dir[0].child) 5406 sd_free_ctl_entry(&sd_ctl_dir[0].child); 5407} 5408#else 5409static void register_sched_domain_sysctl(void) 5410{ 5411} 5412static void unregister_sched_domain_sysctl(void) 5413{ 5414} 5415#endif 5416 5417static void set_rq_online(struct rq *rq) 5418{ 5419 if (!rq->online) { 5420 const struct sched_class *class; 5421 5422 cpumask_set_cpu(rq->cpu, rq->rd->online); 5423 rq->online = 1; 5424 5425 for_each_class(class) { 5426 if (class->rq_online) 5427 class->rq_online(rq); 5428 } 5429 } 5430} 5431 5432static void set_rq_offline(struct rq *rq) 5433{ 5434 if (rq->online) { 5435 const struct sched_class *class; 5436 5437 for_each_class(class) { 5438 if (class->rq_offline) 5439 class->rq_offline(rq); 5440 } 5441 5442 cpumask_clear_cpu(rq->cpu, rq->rd->online); 5443 rq->online = 0; 5444 } 5445} 5446 5447/* 5448 * migration_call - callback that gets triggered when a CPU is added. 5449 * Here we can start up the necessary migration thread for the new CPU. 5450 */ 5451static int __cpuinit 5452migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5453{ 5454 int cpu = (long)hcpu; 5455 unsigned long flags; 5456 struct rq *rq = cpu_rq(cpu); 5457 5458 switch (action & ~CPU_TASKS_FROZEN) { 5459 5460 case CPU_UP_PREPARE: 5461 rq->calc_load_update = calc_load_update; 5462 break; 5463 5464 case CPU_ONLINE: 5465 /* Update our root-domain */ 5466 raw_spin_lock_irqsave(&rq->lock, flags); 5467 if (rq->rd) { 5468 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5469 5470 set_rq_online(rq); 5471 } 5472 raw_spin_unlock_irqrestore(&rq->lock, flags); 5473 break; 5474 5475#ifdef CONFIG_HOTPLUG_CPU 5476 case CPU_DYING: 5477 sched_ttwu_pending(); 5478 /* Update our root-domain */ 5479 raw_spin_lock_irqsave(&rq->lock, flags); 5480 if (rq->rd) { 5481 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5482 set_rq_offline(rq); 5483 } 5484 migrate_tasks(cpu); 5485 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5486 raw_spin_unlock_irqrestore(&rq->lock, flags); 5487 5488 migrate_nr_uninterruptible(rq); 5489 calc_global_load_remove(rq); 5490 break; 5491#endif 5492 } 5493 5494 update_max_interval(); 5495 5496 return NOTIFY_OK; 5497} 5498 5499/* 5500 * Register at high priority so that task migration (migrate_all_tasks) 5501 * happens before everything else. This has to be lower priority than 5502 * the notifier in the perf_event subsystem, though. 5503 */ 5504static struct notifier_block __cpuinitdata migration_notifier = { 5505 .notifier_call = migration_call, 5506 .priority = CPU_PRI_MIGRATION, 5507}; 5508 5509static int __cpuinit sched_cpu_active(struct notifier_block *nfb, 5510 unsigned long action, void *hcpu) 5511{ 5512 switch (action & ~CPU_TASKS_FROZEN) { 5513 case CPU_STARTING: 5514 case CPU_DOWN_FAILED: 5515 set_cpu_active((long)hcpu, true); 5516 return NOTIFY_OK; 5517 default: 5518 return NOTIFY_DONE; 5519 } 5520} 5521 5522static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, 5523 unsigned long action, void *hcpu) 5524{ 5525 switch (action & ~CPU_TASKS_FROZEN) { 5526 case CPU_DOWN_PREPARE: 5527 set_cpu_active((long)hcpu, false); 5528 return NOTIFY_OK; 5529 default: 5530 return NOTIFY_DONE; 5531 } 5532} 5533 5534static int __init migration_init(void) 5535{ 5536 void *cpu = (void *)(long)smp_processor_id(); 5537 int err; 5538 5539 /* Initialize migration for the boot CPU */ 5540 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 5541 BUG_ON(err == NOTIFY_BAD); 5542 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5543 register_cpu_notifier(&migration_notifier); 5544 5545 /* Register cpu active notifiers */ 5546 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); 5547 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); 5548 5549 return 0; 5550} 5551early_initcall(migration_init); 5552#endif 5553 5554#ifdef CONFIG_SMP 5555 5556static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ 5557 5558#ifdef CONFIG_SCHED_DEBUG 5559 5560static __read_mostly int sched_domain_debug_enabled; 5561 5562static int __init sched_domain_debug_setup(char *str) 5563{ 5564 sched_domain_debug_enabled = 1; 5565 5566 return 0; 5567} 5568early_param("sched_debug", sched_domain_debug_setup); 5569 5570static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5571 struct cpumask *groupmask) 5572{ 5573 struct sched_group *group = sd->groups; 5574 char str[256]; 5575 5576 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); 5577 cpumask_clear(groupmask); 5578 5579 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 5580 5581 if (!(sd->flags & SD_LOAD_BALANCE)) { 5582 printk("does not load-balance\n"); 5583 if (sd->parent) 5584 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 5585 " has parent"); 5586 return -1; 5587 } 5588 5589 printk(KERN_CONT "span %s level %s\n", str, sd->name); 5590 5591 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 5592 printk(KERN_ERR "ERROR: domain->span does not contain " 5593 "CPU%d\n", cpu); 5594 } 5595 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 5596 printk(KERN_ERR "ERROR: domain->groups does not contain" 5597 " CPU%d\n", cpu); 5598 } 5599 5600 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 5601 do { 5602 if (!group) { 5603 printk("\n"); 5604 printk(KERN_ERR "ERROR: group is NULL\n"); 5605 break; 5606 } 5607 5608 if (!group->sgp->power) { 5609 printk(KERN_CONT "\n"); 5610 printk(KERN_ERR "ERROR: domain->cpu_power not " 5611 "set\n"); 5612 break; 5613 } 5614 5615 if (!cpumask_weight(sched_group_cpus(group))) { 5616 printk(KERN_CONT "\n"); 5617 printk(KERN_ERR "ERROR: empty group\n"); 5618 break; 5619 } 5620 5621 if (!(sd->flags & SD_OVERLAP) && 5622 cpumask_intersects(groupmask, sched_group_cpus(group))) { 5623 printk(KERN_CONT "\n"); 5624 printk(KERN_ERR "ERROR: repeated CPUs\n"); 5625 break; 5626 } 5627 5628 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 5629 5630 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 5631 5632 printk(KERN_CONT " %s", str); 5633 if (group->sgp->power != SCHED_POWER_SCALE) { 5634 printk(KERN_CONT " (cpu_power = %d)", 5635 group->sgp->power); 5636 } 5637 5638 group = group->next; 5639 } while (group != sd->groups); 5640 printk(KERN_CONT "\n"); 5641 5642 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 5643 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5644 5645 if (sd->parent && 5646 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 5647 printk(KERN_ERR "ERROR: parent span is not a superset " 5648 "of domain->span\n"); 5649 return 0; 5650} 5651 5652static void sched_domain_debug(struct sched_domain *sd, int cpu) 5653{ 5654 int level = 0; 5655 5656 if (!sched_domain_debug_enabled) 5657 return; 5658 5659 if (!sd) { 5660 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 5661 return; 5662 } 5663 5664 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 5665 5666 for (;;) { 5667 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 5668 break; 5669 level++; 5670 sd = sd->parent; 5671 if (!sd) 5672 break; 5673 } 5674} 5675#else /* !CONFIG_SCHED_DEBUG */ 5676# define sched_domain_debug(sd, cpu) do { } while (0) 5677#endif /* CONFIG_SCHED_DEBUG */ 5678 5679static int sd_degenerate(struct sched_domain *sd) 5680{ 5681 if (cpumask_weight(sched_domain_span(sd)) == 1) 5682 return 1; 5683 5684 /* Following flags need at least 2 groups */ 5685 if (sd->flags & (SD_LOAD_BALANCE | 5686 SD_BALANCE_NEWIDLE | 5687 SD_BALANCE_FORK | 5688 SD_BALANCE_EXEC | 5689 SD_SHARE_CPUPOWER | 5690 SD_SHARE_PKG_RESOURCES)) { 5691 if (sd->groups != sd->groups->next) 5692 return 0; 5693 } 5694 5695 /* Following flags don't use groups */ 5696 if (sd->flags & (SD_WAKE_AFFINE)) 5697 return 0; 5698 5699 return 1; 5700} 5701 5702static int 5703sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 5704{ 5705 unsigned long cflags = sd->flags, pflags = parent->flags; 5706 5707 if (sd_degenerate(parent)) 5708 return 1; 5709 5710 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 5711 return 0; 5712 5713 /* Flags needing groups don't count if only 1 group in parent */ 5714 if (parent->groups == parent->groups->next) { 5715 pflags &= ~(SD_LOAD_BALANCE | 5716 SD_BALANCE_NEWIDLE | 5717 SD_BALANCE_FORK | 5718 SD_BALANCE_EXEC | 5719 SD_SHARE_CPUPOWER | 5720 SD_SHARE_PKG_RESOURCES); 5721 if (nr_node_ids == 1) 5722 pflags &= ~SD_SERIALIZE; 5723 } 5724 if (~cflags & pflags) 5725 return 0; 5726 5727 return 1; 5728} 5729 5730static void free_rootdomain(struct rcu_head *rcu) 5731{ 5732 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 5733 5734 cpupri_cleanup(&rd->cpupri); 5735 free_cpumask_var(rd->rto_mask); 5736 free_cpumask_var(rd->online); 5737 free_cpumask_var(rd->span); 5738 kfree(rd); 5739} 5740 5741static void rq_attach_root(struct rq *rq, struct root_domain *rd) 5742{ 5743 struct root_domain *old_rd = NULL; 5744 unsigned long flags; 5745 5746 raw_spin_lock_irqsave(&rq->lock, flags); 5747 5748 if (rq->rd) { 5749 old_rd = rq->rd; 5750 5751 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 5752 set_rq_offline(rq); 5753 5754 cpumask_clear_cpu(rq->cpu, old_rd->span); 5755 5756 /* 5757 * If we dont want to free the old_rt yet then 5758 * set old_rd to NULL to skip the freeing later 5759 * in this function: 5760 */ 5761 if (!atomic_dec_and_test(&old_rd->refcount)) 5762 old_rd = NULL; 5763 } 5764 5765 atomic_inc(&rd->refcount); 5766 rq->rd = rd; 5767 5768 cpumask_set_cpu(rq->cpu, rd->span); 5769 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 5770 set_rq_online(rq); 5771 5772 raw_spin_unlock_irqrestore(&rq->lock, flags); 5773 5774 if (old_rd) 5775 call_rcu_sched(&old_rd->rcu, free_rootdomain); 5776} 5777 5778static int init_rootdomain(struct root_domain *rd) 5779{ 5780 memset(rd, 0, sizeof(*rd)); 5781 5782 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 5783 goto out; 5784 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 5785 goto free_span; 5786 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5787 goto free_online; 5788 5789 if (cpupri_init(&rd->cpupri) != 0) 5790 goto free_rto_mask; 5791 return 0; 5792 5793free_rto_mask: 5794 free_cpumask_var(rd->rto_mask); 5795free_online: 5796 free_cpumask_var(rd->online); 5797free_span: 5798 free_cpumask_var(rd->span); 5799out: 5800 return -ENOMEM; 5801} 5802 5803/* 5804 * By default the system creates a single root-domain with all cpus as 5805 * members (mimicking the global state we have today). 5806 */ 5807struct root_domain def_root_domain; 5808 5809static void init_defrootdomain(void) 5810{ 5811 init_rootdomain(&def_root_domain); 5812 5813 atomic_set(&def_root_domain.refcount, 1); 5814} 5815 5816static struct root_domain *alloc_rootdomain(void) 5817{ 5818 struct root_domain *rd; 5819 5820 rd = kmalloc(sizeof(*rd), GFP_KERNEL); 5821 if (!rd) 5822 return NULL; 5823 5824 if (init_rootdomain(rd) != 0) { 5825 kfree(rd); 5826 return NULL; 5827 } 5828 5829 return rd; 5830} 5831 5832static void free_sched_groups(struct sched_group *sg, int free_sgp) 5833{ 5834 struct sched_group *tmp, *first; 5835 5836 if (!sg) 5837 return; 5838 5839 first = sg; 5840 do { 5841 tmp = sg->next; 5842 5843 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref)) 5844 kfree(sg->sgp); 5845 5846 kfree(sg); 5847 sg = tmp; 5848 } while (sg != first); 5849} 5850 5851static void free_sched_domain(struct rcu_head *rcu) 5852{ 5853 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 5854 5855 /* 5856 * If its an overlapping domain it has private groups, iterate and 5857 * nuke them all. 5858 */ 5859 if (sd->flags & SD_OVERLAP) { 5860 free_sched_groups(sd->groups, 1); 5861 } else if (atomic_dec_and_test(&sd->groups->ref)) { 5862 kfree(sd->groups->sgp); 5863 kfree(sd->groups); 5864 } 5865 kfree(sd); 5866} 5867 5868static void destroy_sched_domain(struct sched_domain *sd, int cpu) 5869{ 5870 call_rcu(&sd->rcu, free_sched_domain); 5871} 5872 5873static void destroy_sched_domains(struct sched_domain *sd, int cpu) 5874{ 5875 for (; sd; sd = sd->parent) 5876 destroy_sched_domain(sd, cpu); 5877} 5878 5879/* 5880 * Keep a special pointer to the highest sched_domain that has 5881 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 5882 * allows us to avoid some pointer chasing select_idle_sibling(). 5883 * 5884 * Also keep a unique ID per domain (we use the first cpu number in 5885 * the cpumask of the domain), this allows us to quickly tell if 5886 * two cpus are in the same cache domain, see cpus_share_cache(). 5887 */ 5888DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5889DEFINE_PER_CPU(int, sd_llc_id); 5890 5891static void update_top_cache_domain(int cpu) 5892{ 5893 struct sched_domain *sd; 5894 int id = cpu; 5895 5896 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 5897 if (sd) 5898 id = cpumask_first(sched_domain_span(sd)); 5899 5900 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 5901 per_cpu(sd_llc_id, cpu) = id; 5902} 5903 5904/* 5905 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 5906 * hold the hotplug lock. 5907 */ 5908static void 5909cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 5910{ 5911 struct rq *rq = cpu_rq(cpu); 5912 struct sched_domain *tmp; 5913 5914 /* Remove the sched domains which do not contribute to scheduling. */ 5915 for (tmp = sd; tmp; ) { 5916 struct sched_domain *parent = tmp->parent; 5917 if (!parent) 5918 break; 5919 5920 if (sd_parent_degenerate(tmp, parent)) { 5921 tmp->parent = parent->parent; 5922 if (parent->parent) 5923 parent->parent->child = tmp; 5924 destroy_sched_domain(parent, cpu); 5925 } else 5926 tmp = tmp->parent; 5927 } 5928 5929 if (sd && sd_degenerate(sd)) { 5930 tmp = sd; 5931 sd = sd->parent; 5932 destroy_sched_domain(tmp, cpu); 5933 if (sd) 5934 sd->child = NULL; 5935 } 5936 5937 sched_domain_debug(sd, cpu); 5938 5939 rq_attach_root(rq, rd); 5940 tmp = rq->sd; 5941 rcu_assign_pointer(rq->sd, sd); 5942 destroy_sched_domains(tmp, cpu); 5943 5944 update_top_cache_domain(cpu); 5945} 5946 5947/* cpus with isolated domains */ 5948static cpumask_var_t cpu_isolated_map; 5949 5950/* Setup the mask of cpus configured for isolated domains */ 5951static int __init isolated_cpu_setup(char *str) 5952{ 5953 alloc_bootmem_cpumask_var(&cpu_isolated_map); 5954 cpulist_parse(str, cpu_isolated_map); 5955 return 1; 5956} 5957 5958__setup("isolcpus=", isolated_cpu_setup); 5959 5960static const struct cpumask *cpu_cpu_mask(int cpu) 5961{ 5962 return cpumask_of_node(cpu_to_node(cpu)); 5963} 5964 5965struct sd_data { 5966 struct sched_domain **__percpu sd; 5967 struct sched_group **__percpu sg; 5968 struct sched_group_power **__percpu sgp; 5969}; 5970 5971struct s_data { 5972 struct sched_domain ** __percpu sd; 5973 struct root_domain *rd; 5974}; 5975 5976enum s_alloc { 5977 sa_rootdomain, 5978 sa_sd, 5979 sa_sd_storage, 5980 sa_none, 5981}; 5982 5983struct sched_domain_topology_level; 5984 5985typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); 5986typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 5987 5988#define SDTL_OVERLAP 0x01 5989 5990struct sched_domain_topology_level { 5991 sched_domain_init_f init; 5992 sched_domain_mask_f mask; 5993 int flags; 5994 int numa_level; 5995 struct sd_data data; 5996}; 5997 5998static int 5999build_overlap_sched_groups(struct sched_domain *sd, int cpu) 6000{ 6001 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; 6002 const struct cpumask *span = sched_domain_span(sd); 6003 struct cpumask *covered = sched_domains_tmpmask; 6004 struct sd_data *sdd = sd->private; 6005 struct sched_domain *child; 6006 int i; 6007 6008 cpumask_clear(covered); 6009 6010 for_each_cpu(i, span) { 6011 struct cpumask *sg_span; 6012 6013 if (cpumask_test_cpu(i, covered)) 6014 continue; 6015 6016 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6017 GFP_KERNEL, cpu_to_node(cpu)); 6018 6019 if (!sg) 6020 goto fail; 6021 6022 sg_span = sched_group_cpus(sg); 6023 6024 child = *per_cpu_ptr(sdd->sd, i); 6025 if (child->child) { 6026 child = child->child; 6027 cpumask_copy(sg_span, sched_domain_span(child)); 6028 } else 6029 cpumask_set_cpu(i, sg_span); 6030 6031 cpumask_or(covered, covered, sg_span); 6032 6033 sg->sgp = *per_cpu_ptr(sdd->sgp, i); 6034 atomic_inc(&sg->sgp->ref); 6035 6036 if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 6037 cpumask_first(sg_span) == cpu) { 6038 WARN_ON_ONCE(!cpumask_test_cpu(cpu, sg_span)); 6039 groups = sg; 6040 } 6041 6042 if (!first) 6043 first = sg; 6044 if (last) 6045 last->next = sg; 6046 last = sg; 6047 last->next = first; 6048 } 6049 sd->groups = groups; 6050 6051 return 0; 6052 6053fail: 6054 free_sched_groups(first, 0); 6055 6056 return -ENOMEM; 6057} 6058 6059static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 6060{ 6061 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 6062 struct sched_domain *child = sd->child; 6063 6064 if (child) 6065 cpu = cpumask_first(sched_domain_span(child)); 6066 6067 if (sg) { 6068 *sg = *per_cpu_ptr(sdd->sg, cpu); 6069 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu); 6070 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */ 6071 } 6072 6073 return cpu; 6074} 6075 6076/* 6077 * build_sched_groups will build a circular linked list of the groups 6078 * covered by the given span, and will set each group's ->cpumask correctly, 6079 * and ->cpu_power to 0. 6080 * 6081 * Assumes the sched_domain tree is fully constructed 6082 */ 6083static int 6084build_sched_groups(struct sched_domain *sd, int cpu) 6085{ 6086 struct sched_group *first = NULL, *last = NULL; 6087 struct sd_data *sdd = sd->private; 6088 const struct cpumask *span = sched_domain_span(sd); 6089 struct cpumask *covered; 6090 int i; 6091 6092 get_group(cpu, sdd, &sd->groups); 6093 atomic_inc(&sd->groups->ref); 6094 6095 if (cpu != cpumask_first(sched_domain_span(sd))) 6096 return 0; 6097 6098 lockdep_assert_held(&sched_domains_mutex); 6099 covered = sched_domains_tmpmask; 6100 6101 cpumask_clear(covered); 6102 6103 for_each_cpu(i, span) { 6104 struct sched_group *sg; 6105 int group = get_group(i, sdd, &sg); 6106 int j; 6107 6108 if (cpumask_test_cpu(i, covered)) 6109 continue; 6110 6111 cpumask_clear(sched_group_cpus(sg)); 6112 sg->sgp->power = 0; 6113 6114 for_each_cpu(j, span) { 6115 if (get_group(j, sdd, NULL) != group) 6116 continue; 6117 6118 cpumask_set_cpu(j, covered); 6119 cpumask_set_cpu(j, sched_group_cpus(sg)); 6120 } 6121 6122 if (!first) 6123 first = sg; 6124 if (last) 6125 last->next = sg; 6126 last = sg; 6127 } 6128 last->next = first; 6129 6130 return 0; 6131} 6132 6133/* 6134 * Initialize sched groups cpu_power. 6135 * 6136 * cpu_power indicates the capacity of sched group, which is used while 6137 * distributing the load between different sched groups in a sched domain. 6138 * Typically cpu_power for all the groups in a sched domain will be same unless 6139 * there are asymmetries in the topology. If there are asymmetries, group 6140 * having more cpu_power will pickup more load compared to the group having 6141 * less cpu_power. 6142 */ 6143static void init_sched_groups_power(int cpu, struct sched_domain *sd) 6144{ 6145 struct sched_group *sg = sd->groups; 6146 6147 WARN_ON(!sd || !sg); 6148 6149 do { 6150 sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 6151 sg = sg->next; 6152 } while (sg != sd->groups); 6153 6154 if (cpu != group_first_cpu(sg)) 6155 return; 6156 6157 update_group_power(sd, cpu); 6158 atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); 6159} 6160 6161int __weak arch_sd_sibling_asym_packing(void) 6162{ 6163 return 0*SD_ASYM_PACKING; 6164} 6165 6166/* 6167 * Initializers for schedule domains 6168 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 6169 */ 6170 6171#ifdef CONFIG_SCHED_DEBUG 6172# define SD_INIT_NAME(sd, type) sd->name = #type 6173#else 6174# define SD_INIT_NAME(sd, type) do { } while (0) 6175#endif 6176 6177#define SD_INIT_FUNC(type) \ 6178static noinline struct sched_domain * \ 6179sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ 6180{ \ 6181 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \ 6182 *sd = SD_##type##_INIT; \ 6183 SD_INIT_NAME(sd, type); \ 6184 sd->private = &tl->data; \ 6185 return sd; \ 6186} 6187 6188SD_INIT_FUNC(CPU) 6189#ifdef CONFIG_SCHED_SMT 6190 SD_INIT_FUNC(SIBLING) 6191#endif 6192#ifdef CONFIG_SCHED_MC 6193 SD_INIT_FUNC(MC) 6194#endif 6195#ifdef CONFIG_SCHED_BOOK 6196 SD_INIT_FUNC(BOOK) 6197#endif 6198 6199static int default_relax_domain_level = -1; 6200int sched_domain_level_max; 6201 6202static int __init setup_relax_domain_level(char *str) 6203{ 6204 unsigned long val; 6205 6206 val = simple_strtoul(str, NULL, 0); 6207 if (val < sched_domain_level_max) 6208 default_relax_domain_level = val; 6209 6210 return 1; 6211} 6212__setup("relax_domain_level=", setup_relax_domain_level); 6213 6214static void set_domain_attribute(struct sched_domain *sd, 6215 struct sched_domain_attr *attr) 6216{ 6217 int request; 6218 6219 if (!attr || attr->relax_domain_level < 0) { 6220 if (default_relax_domain_level < 0) 6221 return; 6222 else 6223 request = default_relax_domain_level; 6224 } else 6225 request = attr->relax_domain_level; 6226 if (request < sd->level) { 6227 /* turn off idle balance on this domain */ 6228 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6229 } else { 6230 /* turn on idle balance on this domain */ 6231 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6232 } 6233} 6234 6235static void __sdt_free(const struct cpumask *cpu_map); 6236static int __sdt_alloc(const struct cpumask *cpu_map); 6237 6238static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 6239 const struct cpumask *cpu_map) 6240{ 6241 switch (what) { 6242 case sa_rootdomain: 6243 if (!atomic_read(&d->rd->refcount)) 6244 free_rootdomain(&d->rd->rcu); /* fall through */ 6245 case sa_sd: 6246 free_percpu(d->sd); /* fall through */ 6247 case sa_sd_storage: 6248 __sdt_free(cpu_map); /* fall through */ 6249 case sa_none: 6250 break; 6251 } 6252} 6253 6254static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, 6255 const struct cpumask *cpu_map) 6256{ 6257 memset(d, 0, sizeof(*d)); 6258 6259 if (__sdt_alloc(cpu_map)) 6260 return sa_sd_storage; 6261 d->sd = alloc_percpu(struct sched_domain *); 6262 if (!d->sd) 6263 return sa_sd_storage; 6264 d->rd = alloc_rootdomain(); 6265 if (!d->rd) 6266 return sa_sd; 6267 return sa_rootdomain; 6268} 6269 6270/* 6271 * NULL the sd_data elements we've used to build the sched_domain and 6272 * sched_group structure so that the subsequent __free_domain_allocs() 6273 * will not free the data we're using. 6274 */ 6275static void claim_allocations(int cpu, struct sched_domain *sd) 6276{ 6277 struct sd_data *sdd = sd->private; 6278 6279 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 6280 *per_cpu_ptr(sdd->sd, cpu) = NULL; 6281 6282 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 6283 *per_cpu_ptr(sdd->sg, cpu) = NULL; 6284 6285 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref)) 6286 *per_cpu_ptr(sdd->sgp, cpu) = NULL; 6287} 6288 6289#ifdef CONFIG_SCHED_SMT 6290static const struct cpumask *cpu_smt_mask(int cpu) 6291{ 6292 return topology_thread_cpumask(cpu); 6293} 6294#endif 6295 6296/* 6297 * Topology list, bottom-up. 6298 */ 6299static struct sched_domain_topology_level default_topology[] = { 6300#ifdef CONFIG_SCHED_SMT 6301 { sd_init_SIBLING, cpu_smt_mask, }, 6302#endif 6303#ifdef CONFIG_SCHED_MC 6304 { sd_init_MC, cpu_coregroup_mask, }, 6305#endif 6306#ifdef CONFIG_SCHED_BOOK 6307 { sd_init_BOOK, cpu_book_mask, }, 6308#endif 6309 { sd_init_CPU, cpu_cpu_mask, }, 6310 { NULL, }, 6311}; 6312 6313static struct sched_domain_topology_level *sched_domain_topology = default_topology; 6314 6315#ifdef CONFIG_NUMA 6316 6317static int sched_domains_numa_levels; 6318static int sched_domains_numa_scale; 6319static int *sched_domains_numa_distance; 6320static struct cpumask ***sched_domains_numa_masks; 6321static int sched_domains_curr_level; 6322 6323static inline int sd_local_flags(int level) 6324{ 6325 if (sched_domains_numa_distance[level] > REMOTE_DISTANCE) 6326 return 0; 6327 6328 return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; 6329} 6330 6331static struct sched_domain * 6332sd_numa_init(struct sched_domain_topology_level *tl, int cpu) 6333{ 6334 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); 6335 int level = tl->numa_level; 6336 int sd_weight = cpumask_weight( 6337 sched_domains_numa_masks[level][cpu_to_node(cpu)]); 6338 6339 *sd = (struct sched_domain){ 6340 .min_interval = sd_weight, 6341 .max_interval = 2*sd_weight, 6342 .busy_factor = 32, 6343 .imbalance_pct = 125, 6344 .cache_nice_tries = 2, 6345 .busy_idx = 3, 6346 .idle_idx = 2, 6347 .newidle_idx = 0, 6348 .wake_idx = 0, 6349 .forkexec_idx = 0, 6350 6351 .flags = 1*SD_LOAD_BALANCE 6352 | 1*SD_BALANCE_NEWIDLE 6353 | 0*SD_BALANCE_EXEC 6354 | 0*SD_BALANCE_FORK 6355 | 0*SD_BALANCE_WAKE 6356 | 0*SD_WAKE_AFFINE 6357 | 0*SD_PREFER_LOCAL 6358 | 0*SD_SHARE_CPUPOWER 6359 | 0*SD_SHARE_PKG_RESOURCES 6360 | 1*SD_SERIALIZE 6361 | 0*SD_PREFER_SIBLING 6362 | sd_local_flags(level) 6363 , 6364 .last_balance = jiffies, 6365 .balance_interval = sd_weight, 6366 }; 6367 SD_INIT_NAME(sd, NUMA); 6368 sd->private = &tl->data; 6369 6370 /* 6371 * Ugly hack to pass state to sd_numa_mask()... 6372 */ 6373 sched_domains_curr_level = tl->numa_level; 6374 6375 return sd; 6376} 6377 6378static const struct cpumask *sd_numa_mask(int cpu) 6379{ 6380 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6381} 6382 6383static void sched_init_numa(void) 6384{ 6385 int next_distance, curr_distance = node_distance(0, 0); 6386 struct sched_domain_topology_level *tl; 6387 int level = 0; 6388 int i, j, k; 6389 6390 sched_domains_numa_scale = curr_distance; 6391 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6392 if (!sched_domains_numa_distance) 6393 return; 6394 6395 /* 6396 * O(nr_nodes^2) deduplicating selection sort -- in order to find the 6397 * unique distances in the node_distance() table. 6398 * 6399 * Assumes node_distance(0,j) includes all distances in 6400 * node_distance(i,j) in order to avoid cubic time. 6401 * 6402 * XXX: could be optimized to O(n log n) by using sort() 6403 */ 6404 next_distance = curr_distance; 6405 for (i = 0; i < nr_node_ids; i++) { 6406 for (j = 0; j < nr_node_ids; j++) { 6407 int distance = node_distance(0, j); 6408 if (distance > curr_distance && 6409 (distance < next_distance || 6410 next_distance == curr_distance)) 6411 next_distance = distance; 6412 } 6413 if (next_distance != curr_distance) { 6414 sched_domains_numa_distance[level++] = next_distance; 6415 sched_domains_numa_levels = level; 6416 curr_distance = next_distance; 6417 } else break; 6418 } 6419 /* 6420 * 'level' contains the number of unique distances, excluding the 6421 * identity distance node_distance(i,i). 6422 * 6423 * The sched_domains_nume_distance[] array includes the actual distance 6424 * numbers. 6425 */ 6426 6427 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 6428 if (!sched_domains_numa_masks) 6429 return; 6430 6431 /* 6432 * Now for each level, construct a mask per node which contains all 6433 * cpus of nodes that are that many hops away from us. 6434 */ 6435 for (i = 0; i < level; i++) { 6436 sched_domains_numa_masks[i] = 6437 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 6438 if (!sched_domains_numa_masks[i]) 6439 return; 6440 6441 for (j = 0; j < nr_node_ids; j++) { 6442 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 6443 if (!mask) 6444 return; 6445 6446 sched_domains_numa_masks[i][j] = mask; 6447 6448 for (k = 0; k < nr_node_ids; k++) { 6449 if (node_distance(j, k) > sched_domains_numa_distance[i]) 6450 continue; 6451 6452 cpumask_or(mask, mask, cpumask_of_node(k)); 6453 } 6454 } 6455 } 6456 6457 tl = kzalloc((ARRAY_SIZE(default_topology) + level) * 6458 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 6459 if (!tl) 6460 return; 6461 6462 /* 6463 * Copy the default topology bits.. 6464 */ 6465 for (i = 0; default_topology[i].init; i++) 6466 tl[i] = default_topology[i]; 6467 6468 /* 6469 * .. and append 'j' levels of NUMA goodness. 6470 */ 6471 for (j = 0; j < level; i++, j++) { 6472 tl[i] = (struct sched_domain_topology_level){ 6473 .init = sd_numa_init, 6474 .mask = sd_numa_mask, 6475 .flags = SDTL_OVERLAP, 6476 .numa_level = j, 6477 }; 6478 } 6479 6480 sched_domain_topology = tl; 6481} 6482#else 6483static inline void sched_init_numa(void) 6484{ 6485} 6486#endif /* CONFIG_NUMA */ 6487 6488static int __sdt_alloc(const struct cpumask *cpu_map) 6489{ 6490 struct sched_domain_topology_level *tl; 6491 int j; 6492 6493 for (tl = sched_domain_topology; tl->init; tl++) { 6494 struct sd_data *sdd = &tl->data; 6495 6496 sdd->sd = alloc_percpu(struct sched_domain *); 6497 if (!sdd->sd) 6498 return -ENOMEM; 6499 6500 sdd->sg = alloc_percpu(struct sched_group *); 6501 if (!sdd->sg) 6502 return -ENOMEM; 6503 6504 sdd->sgp = alloc_percpu(struct sched_group_power *); 6505 if (!sdd->sgp) 6506 return -ENOMEM; 6507 6508 for_each_cpu(j, cpu_map) { 6509 struct sched_domain *sd; 6510 struct sched_group *sg; 6511 struct sched_group_power *sgp; 6512 6513 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 6514 GFP_KERNEL, cpu_to_node(j)); 6515 if (!sd) 6516 return -ENOMEM; 6517 6518 *per_cpu_ptr(sdd->sd, j) = sd; 6519 6520 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6521 GFP_KERNEL, cpu_to_node(j)); 6522 if (!sg) 6523 return -ENOMEM; 6524 6525 sg->next = sg; 6526 6527 *per_cpu_ptr(sdd->sg, j) = sg; 6528 6529 sgp = kzalloc_node(sizeof(struct sched_group_power), 6530 GFP_KERNEL, cpu_to_node(j)); 6531 if (!sgp) 6532 return -ENOMEM; 6533 6534 *per_cpu_ptr(sdd->sgp, j) = sgp; 6535 } 6536 } 6537 6538 return 0; 6539} 6540 6541static void __sdt_free(const struct cpumask *cpu_map) 6542{ 6543 struct sched_domain_topology_level *tl; 6544 int j; 6545 6546 for (tl = sched_domain_topology; tl->init; tl++) { 6547 struct sd_data *sdd = &tl->data; 6548 6549 for_each_cpu(j, cpu_map) { 6550 struct sched_domain *sd; 6551 6552 if (sdd->sd) { 6553 sd = *per_cpu_ptr(sdd->sd, j); 6554 if (sd && (sd->flags & SD_OVERLAP)) 6555 free_sched_groups(sd->groups, 0); 6556 kfree(*per_cpu_ptr(sdd->sd, j)); 6557 } 6558 6559 if (sdd->sg) 6560 kfree(*per_cpu_ptr(sdd->sg, j)); 6561 if (sdd->sgp) 6562 kfree(*per_cpu_ptr(sdd->sgp, j)); 6563 } 6564 free_percpu(sdd->sd); 6565 sdd->sd = NULL; 6566 free_percpu(sdd->sg); 6567 sdd->sg = NULL; 6568 free_percpu(sdd->sgp); 6569 sdd->sgp = NULL; 6570 } 6571} 6572 6573struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 6574 struct s_data *d, const struct cpumask *cpu_map, 6575 struct sched_domain_attr *attr, struct sched_domain *child, 6576 int cpu) 6577{ 6578 struct sched_domain *sd = tl->init(tl, cpu); 6579 if (!sd) 6580 return child; 6581 6582 set_domain_attribute(sd, attr); 6583 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 6584 if (child) { 6585 sd->level = child->level + 1; 6586 sched_domain_level_max = max(sched_domain_level_max, sd->level); 6587 child->parent = sd; 6588 } 6589 sd->child = child; 6590 6591 return sd; 6592} 6593 6594/* 6595 * Build sched domains for a given set of cpus and attach the sched domains 6596 * to the individual cpus 6597 */ 6598static int build_sched_domains(const struct cpumask *cpu_map, 6599 struct sched_domain_attr *attr) 6600{ 6601 enum s_alloc alloc_state = sa_none; 6602 struct sched_domain *sd; 6603 struct s_data d; 6604 int i, ret = -ENOMEM; 6605 6606 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 6607 if (alloc_state != sa_rootdomain) 6608 goto error; 6609 6610 /* Set up domains for cpus specified by the cpu_map. */ 6611 for_each_cpu(i, cpu_map) { 6612 struct sched_domain_topology_level *tl; 6613 6614 sd = NULL; 6615 for (tl = sched_domain_topology; tl->init; tl++) { 6616 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); 6617 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 6618 sd->flags |= SD_OVERLAP; 6619 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 6620 break; 6621 } 6622 6623 while (sd->child) 6624 sd = sd->child; 6625 6626 *per_cpu_ptr(d.sd, i) = sd; 6627 } 6628 6629 /* Build the groups for the domains */ 6630 for_each_cpu(i, cpu_map) { 6631 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6632 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 6633 if (sd->flags & SD_OVERLAP) { 6634 if (build_overlap_sched_groups(sd, i)) 6635 goto error; 6636 } else { 6637 if (build_sched_groups(sd, i)) 6638 goto error; 6639 } 6640 } 6641 } 6642 6643 /* Calculate CPU power for physical packages and nodes */ 6644 for (i = nr_cpumask_bits-1; i >= 0; i--) { 6645 if (!cpumask_test_cpu(i, cpu_map)) 6646 continue; 6647 6648 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6649 claim_allocations(i, sd); 6650 init_sched_groups_power(i, sd); 6651 } 6652 } 6653 6654 /* Attach the domains */ 6655 rcu_read_lock(); 6656 for_each_cpu(i, cpu_map) { 6657 sd = *per_cpu_ptr(d.sd, i); 6658 cpu_attach_domain(sd, d.rd, i); 6659 } 6660 rcu_read_unlock(); 6661 6662 ret = 0; 6663error: 6664 __free_domain_allocs(&d, alloc_state, cpu_map); 6665 return ret; 6666} 6667 6668static cpumask_var_t *doms_cur; /* current sched domains */ 6669static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 6670static struct sched_domain_attr *dattr_cur; 6671 /* attribues of custom domains in 'doms_cur' */ 6672 6673/* 6674 * Special case: If a kmalloc of a doms_cur partition (array of 6675 * cpumask) fails, then fallback to a single sched domain, 6676 * as determined by the single cpumask fallback_doms. 6677 */ 6678static cpumask_var_t fallback_doms; 6679 6680/* 6681 * arch_update_cpu_topology lets virtualized architectures update the 6682 * cpu core maps. It is supposed to return 1 if the topology changed 6683 * or 0 if it stayed the same. 6684 */ 6685int __attribute__((weak)) arch_update_cpu_topology(void) 6686{ 6687 return 0; 6688} 6689 6690cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 6691{ 6692 int i; 6693 cpumask_var_t *doms; 6694 6695 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 6696 if (!doms) 6697 return NULL; 6698 for (i = 0; i < ndoms; i++) { 6699 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 6700 free_sched_domains(doms, i); 6701 return NULL; 6702 } 6703 } 6704 return doms; 6705} 6706 6707void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 6708{ 6709 unsigned int i; 6710 for (i = 0; i < ndoms; i++) 6711 free_cpumask_var(doms[i]); 6712 kfree(doms); 6713} 6714 6715/* 6716 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6717 * For now this just excludes isolated cpus, but could be used to 6718 * exclude other special cases in the future. 6719 */ 6720static int init_sched_domains(const struct cpumask *cpu_map) 6721{ 6722 int err; 6723 6724 arch_update_cpu_topology(); 6725 ndoms_cur = 1; 6726 doms_cur = alloc_sched_domains(ndoms_cur); 6727 if (!doms_cur) 6728 doms_cur = &fallback_doms; 6729 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 6730 dattr_cur = NULL; 6731 err = build_sched_domains(doms_cur[0], NULL); 6732 register_sched_domain_sysctl(); 6733 6734 return err; 6735} 6736 6737/* 6738 * Detach sched domains from a group of cpus specified in cpu_map 6739 * These cpus will now be attached to the NULL domain 6740 */ 6741static void detach_destroy_domains(const struct cpumask *cpu_map) 6742{ 6743 int i; 6744 6745 rcu_read_lock(); 6746 for_each_cpu(i, cpu_map) 6747 cpu_attach_domain(NULL, &def_root_domain, i); 6748 rcu_read_unlock(); 6749} 6750 6751/* handle null as "default" */ 6752static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 6753 struct sched_domain_attr *new, int idx_new) 6754{ 6755 struct sched_domain_attr tmp; 6756 6757 /* fast path */ 6758 if (!new && !cur) 6759 return 1; 6760 6761 tmp = SD_ATTR_INIT; 6762 return !memcmp(cur ? (cur + idx_cur) : &tmp, 6763 new ? (new + idx_new) : &tmp, 6764 sizeof(struct sched_domain_attr)); 6765} 6766 6767/* 6768 * Partition sched domains as specified by the 'ndoms_new' 6769 * cpumasks in the array doms_new[] of cpumasks. This compares 6770 * doms_new[] to the current sched domain partitioning, doms_cur[]. 6771 * It destroys each deleted domain and builds each new domain. 6772 * 6773 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 6774 * The masks don't intersect (don't overlap.) We should setup one 6775 * sched domain for each mask. CPUs not in any of the cpumasks will 6776 * not be load balanced. If the same cpumask appears both in the 6777 * current 'doms_cur' domains and in the new 'doms_new', we can leave 6778 * it as it is. 6779 * 6780 * The passed in 'doms_new' should be allocated using 6781 * alloc_sched_domains. This routine takes ownership of it and will 6782 * free_sched_domains it when done with it. If the caller failed the 6783 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 6784 * and partition_sched_domains() will fallback to the single partition 6785 * 'fallback_doms', it also forces the domains to be rebuilt. 6786 * 6787 * If doms_new == NULL it will be replaced with cpu_online_mask. 6788 * ndoms_new == 0 is a special case for destroying existing domains, 6789 * and it will not create the default domain. 6790 * 6791 * Call with hotplug lock held 6792 */ 6793void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 6794 struct sched_domain_attr *dattr_new) 6795{ 6796 int i, j, n; 6797 int new_topology; 6798 6799 mutex_lock(&sched_domains_mutex); 6800 6801 /* always unregister in case we don't destroy any domains */ 6802 unregister_sched_domain_sysctl(); 6803 6804 /* Let architecture update cpu core mappings. */ 6805 new_topology = arch_update_cpu_topology(); 6806 6807 n = doms_new ? ndoms_new : 0; 6808 6809 /* Destroy deleted domains */ 6810 for (i = 0; i < ndoms_cur; i++) { 6811 for (j = 0; j < n && !new_topology; j++) { 6812 if (cpumask_equal(doms_cur[i], doms_new[j]) 6813 && dattrs_equal(dattr_cur, i, dattr_new, j)) 6814 goto match1; 6815 } 6816 /* no match - a current sched domain not in new doms_new[] */ 6817 detach_destroy_domains(doms_cur[i]); 6818match1: 6819 ; 6820 } 6821 6822 if (doms_new == NULL) { 6823 ndoms_cur = 0; 6824 doms_new = &fallback_doms; 6825 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 6826 WARN_ON_ONCE(dattr_new); 6827 } 6828 6829 /* Build new domains */ 6830 for (i = 0; i < ndoms_new; i++) { 6831 for (j = 0; j < ndoms_cur && !new_topology; j++) { 6832 if (cpumask_equal(doms_new[i], doms_cur[j]) 6833 && dattrs_equal(dattr_new, i, dattr_cur, j)) 6834 goto match2; 6835 } 6836 /* no match - add a new doms_new */ 6837 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 6838match2: 6839 ; 6840 } 6841 6842 /* Remember the new sched domains */ 6843 if (doms_cur != &fallback_doms) 6844 free_sched_domains(doms_cur, ndoms_cur); 6845 kfree(dattr_cur); /* kfree(NULL) is safe */ 6846 doms_cur = doms_new; 6847 dattr_cur = dattr_new; 6848 ndoms_cur = ndoms_new; 6849 6850 register_sched_domain_sysctl(); 6851 6852 mutex_unlock(&sched_domains_mutex); 6853} 6854 6855/* 6856 * Update cpusets according to cpu_active mask. If cpusets are 6857 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 6858 * around partition_sched_domains(). 6859 */ 6860static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 6861 void *hcpu) 6862{ 6863 switch (action & ~CPU_TASKS_FROZEN) { 6864 case CPU_ONLINE: 6865 case CPU_DOWN_FAILED: 6866 cpuset_update_active_cpus(); 6867 return NOTIFY_OK; 6868 default: 6869 return NOTIFY_DONE; 6870 } 6871} 6872 6873static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, 6874 void *hcpu) 6875{ 6876 switch (action & ~CPU_TASKS_FROZEN) { 6877 case CPU_DOWN_PREPARE: 6878 cpuset_update_active_cpus(); 6879 return NOTIFY_OK; 6880 default: 6881 return NOTIFY_DONE; 6882 } 6883} 6884 6885void __init sched_init_smp(void) 6886{ 6887 cpumask_var_t non_isolated_cpus; 6888 6889 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 6890 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 6891 6892 sched_init_numa(); 6893 6894 get_online_cpus(); 6895 mutex_lock(&sched_domains_mutex); 6896 init_sched_domains(cpu_active_mask); 6897 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 6898 if (cpumask_empty(non_isolated_cpus)) 6899 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 6900 mutex_unlock(&sched_domains_mutex); 6901 put_online_cpus(); 6902 6903 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); 6904 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); 6905 6906 /* RT runtime code needs to handle some hotplug events */ 6907 hotcpu_notifier(update_runtime, 0); 6908 6909 init_hrtick(); 6910 6911 /* Move init over to a non-isolated CPU */ 6912 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) 6913 BUG(); 6914 sched_init_granularity(); 6915 free_cpumask_var(non_isolated_cpus); 6916 6917 init_sched_rt_class(); 6918} 6919#else 6920void __init sched_init_smp(void) 6921{ 6922 sched_init_granularity(); 6923} 6924#endif /* CONFIG_SMP */ 6925 6926const_debug unsigned int sysctl_timer_migration = 1; 6927 6928int in_sched_functions(unsigned long addr) 6929{ 6930 return in_lock_functions(addr) || 6931 (addr >= (unsigned long)__sched_text_start 6932 && addr < (unsigned long)__sched_text_end); 6933} 6934 6935#ifdef CONFIG_CGROUP_SCHED 6936struct task_group root_task_group; 6937#endif 6938 6939DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); 6940 6941void __init sched_init(void) 6942{ 6943 int i, j; 6944 unsigned long alloc_size = 0, ptr; 6945 6946#ifdef CONFIG_FAIR_GROUP_SCHED 6947 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 6948#endif 6949#ifdef CONFIG_RT_GROUP_SCHED 6950 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 6951#endif 6952#ifdef CONFIG_CPUMASK_OFFSTACK 6953 alloc_size += num_possible_cpus() * cpumask_size(); 6954#endif 6955 if (alloc_size) { 6956 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 6957 6958#ifdef CONFIG_FAIR_GROUP_SCHED 6959 root_task_group.se = (struct sched_entity **)ptr; 6960 ptr += nr_cpu_ids * sizeof(void **); 6961 6962 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 6963 ptr += nr_cpu_ids * sizeof(void **); 6964 6965#endif /* CONFIG_FAIR_GROUP_SCHED */ 6966#ifdef CONFIG_RT_GROUP_SCHED 6967 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 6968 ptr += nr_cpu_ids * sizeof(void **); 6969 6970 root_task_group.rt_rq = (struct rt_rq **)ptr; 6971 ptr += nr_cpu_ids * sizeof(void **); 6972 6973#endif /* CONFIG_RT_GROUP_SCHED */ 6974#ifdef CONFIG_CPUMASK_OFFSTACK 6975 for_each_possible_cpu(i) { 6976 per_cpu(load_balance_tmpmask, i) = (void *)ptr; 6977 ptr += cpumask_size(); 6978 } 6979#endif /* CONFIG_CPUMASK_OFFSTACK */ 6980 } 6981 6982#ifdef CONFIG_SMP 6983 init_defrootdomain(); 6984#endif 6985 6986 init_rt_bandwidth(&def_rt_bandwidth, 6987 global_rt_period(), global_rt_runtime()); 6988 6989#ifdef CONFIG_RT_GROUP_SCHED 6990 init_rt_bandwidth(&root_task_group.rt_bandwidth, 6991 global_rt_period(), global_rt_runtime()); 6992#endif /* CONFIG_RT_GROUP_SCHED */ 6993 6994#ifdef CONFIG_CGROUP_SCHED 6995 list_add(&root_task_group.list, &task_groups); 6996 INIT_LIST_HEAD(&root_task_group.children); 6997 INIT_LIST_HEAD(&root_task_group.siblings); 6998 autogroup_init(&init_task); 6999 7000#endif /* CONFIG_CGROUP_SCHED */ 7001 7002#ifdef CONFIG_CGROUP_CPUACCT 7003 root_cpuacct.cpustat = &kernel_cpustat; 7004 root_cpuacct.cpuusage = alloc_percpu(u64); 7005 /* Too early, not expected to fail */ 7006 BUG_ON(!root_cpuacct.cpuusage); 7007#endif 7008 for_each_possible_cpu(i) { 7009 struct rq *rq; 7010 7011 rq = cpu_rq(i); 7012 raw_spin_lock_init(&rq->lock); 7013 rq->nr_running = 0; 7014 rq->calc_load_active = 0; 7015 rq->calc_load_update = jiffies + LOAD_FREQ; 7016 init_cfs_rq(&rq->cfs); 7017 init_rt_rq(&rq->rt, rq); 7018#ifdef CONFIG_FAIR_GROUP_SCHED 7019 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 7020 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 7021 /* 7022 * How much cpu bandwidth does root_task_group get? 7023 * 7024 * In case of task-groups formed thr' the cgroup filesystem, it 7025 * gets 100% of the cpu resources in the system. This overall 7026 * system cpu resource is divided among the tasks of 7027 * root_task_group and its child task-groups in a fair manner, 7028 * based on each entity's (task or task-group's) weight 7029 * (se->load.weight). 7030 * 7031 * In other words, if root_task_group has 10 tasks of weight 7032 * 1024) and two child groups A0 and A1 (of weight 1024 each), 7033 * then A0's share of the cpu resource is: 7034 * 7035 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 7036 * 7037 * We achieve this by letting root_task_group's tasks sit 7038 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 7039 */ 7040 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 7041 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 7042#endif /* CONFIG_FAIR_GROUP_SCHED */ 7043 7044 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 7045#ifdef CONFIG_RT_GROUP_SCHED 7046 INIT_LIST_HEAD(&rq->leaf_rt_rq_list); 7047 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 7048#endif 7049 7050 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7051 rq->cpu_load[j] = 0; 7052 7053 rq->last_load_update_tick = jiffies; 7054 7055#ifdef CONFIG_SMP 7056 rq->sd = NULL; 7057 rq->rd = NULL; 7058 rq->cpu_power = SCHED_POWER_SCALE; 7059 rq->post_schedule = 0; 7060 rq->active_balance = 0; 7061 rq->next_balance = jiffies; 7062 rq->push_cpu = 0; 7063 rq->cpu = i; 7064 rq->online = 0; 7065 rq->idle_stamp = 0; 7066 rq->avg_idle = 2*sysctl_sched_migration_cost; 7067 7068 INIT_LIST_HEAD(&rq->cfs_tasks); 7069 7070 rq_attach_root(rq, &def_root_domain); 7071#ifdef CONFIG_NO_HZ 7072 rq->nohz_flags = 0; 7073#endif 7074#endif 7075 init_rq_hrtick(rq); 7076 atomic_set(&rq->nr_iowait, 0); 7077 } 7078 7079 set_load_weight(&init_task); 7080 7081#ifdef CONFIG_PREEMPT_NOTIFIERS 7082 INIT_HLIST_HEAD(&init_task.preempt_notifiers); 7083#endif 7084 7085#ifdef CONFIG_RT_MUTEXES 7086 plist_head_init(&init_task.pi_waiters); 7087#endif 7088 7089 /* 7090 * The boot idle thread does lazy MMU switching as well: 7091 */ 7092 atomic_inc(&init_mm.mm_count); 7093 enter_lazy_tlb(&init_mm, current); 7094 7095 /* 7096 * Make us the idle thread. Technically, schedule() should not be 7097 * called from this thread, however somewhere below it might be, 7098 * but because we are the idle thread, we just pick up running again 7099 * when this runqueue becomes "idle". 7100 */ 7101 init_idle(current, smp_processor_id()); 7102 7103 calc_load_update = jiffies + LOAD_FREQ; 7104 7105 /* 7106 * During early bootup we pretend to be a normal task: 7107 */ 7108 current->sched_class = &fair_sched_class; 7109 7110#ifdef CONFIG_SMP 7111 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7112 /* May be allocated at isolcpus cmdline parse time */ 7113 if (cpu_isolated_map == NULL) 7114 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7115 idle_thread_set_boot_cpu(); 7116#endif 7117 init_sched_fair_class(); 7118 7119 scheduler_running = 1; 7120} 7121 7122#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7123static inline int preempt_count_equals(int preempt_offset) 7124{ 7125 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); 7126 7127 return (nested == preempt_offset); 7128} 7129 7130void __might_sleep(const char *file, int line, int preempt_offset) 7131{ 7132 static unsigned long prev_jiffy; /* ratelimiting */ 7133 7134 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ 7135 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || 7136 system_state != SYSTEM_RUNNING || oops_in_progress) 7137 return; 7138 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7139 return; 7140 prev_jiffy = jiffies; 7141 7142 printk(KERN_ERR 7143 "BUG: sleeping function called from invalid context at %s:%d\n", 7144 file, line); 7145 printk(KERN_ERR 7146 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 7147 in_atomic(), irqs_disabled(), 7148 current->pid, current->comm); 7149 7150 debug_show_held_locks(current); 7151 if (irqs_disabled()) 7152 print_irqtrace_events(current); 7153 dump_stack(); 7154} 7155EXPORT_SYMBOL(__might_sleep); 7156#endif 7157 7158#ifdef CONFIG_MAGIC_SYSRQ 7159static void normalize_task(struct rq *rq, struct task_struct *p) 7160{ 7161 const struct sched_class *prev_class = p->sched_class; 7162 int old_prio = p->prio; 7163 int on_rq; 7164 7165 on_rq = p->on_rq; 7166 if (on_rq) 7167 dequeue_task(rq, p, 0); 7168 __setscheduler(rq, p, SCHED_NORMAL, 0); 7169 if (on_rq) { 7170 enqueue_task(rq, p, 0); 7171 resched_task(rq->curr); 7172 } 7173 7174 check_class_changed(rq, p, prev_class, old_prio); 7175} 7176 7177void normalize_rt_tasks(void) 7178{ 7179 struct task_struct *g, *p; 7180 unsigned long flags; 7181 struct rq *rq; 7182 7183 read_lock_irqsave(&tasklist_lock, flags); 7184 do_each_thread(g, p) { 7185 /* 7186 * Only normalize user tasks: 7187 */ 7188 if (!p->mm) 7189 continue; 7190 7191 p->se.exec_start = 0; 7192#ifdef CONFIG_SCHEDSTATS 7193 p->se.statistics.wait_start = 0; 7194 p->se.statistics.sleep_start = 0; 7195 p->se.statistics.block_start = 0; 7196#endif 7197 7198 if (!rt_task(p)) { 7199 /* 7200 * Renice negative nice level userspace 7201 * tasks back to 0: 7202 */ 7203 if (TASK_NICE(p) < 0 && p->mm) 7204 set_user_nice(p, 0); 7205 continue; 7206 } 7207 7208 raw_spin_lock(&p->pi_lock); 7209 rq = __task_rq_lock(p); 7210 7211 normalize_task(rq, p); 7212 7213 __task_rq_unlock(rq); 7214 raw_spin_unlock(&p->pi_lock); 7215 } while_each_thread(g, p); 7216 7217 read_unlock_irqrestore(&tasklist_lock, flags); 7218} 7219 7220#endif /* CONFIG_MAGIC_SYSRQ */ 7221 7222#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 7223/* 7224 * These functions are only useful for the IA64 MCA handling, or kdb. 7225 * 7226 * They can only be called when the whole system has been 7227 * stopped - every CPU needs to be quiescent, and no scheduling 7228 * activity can take place. Using them for anything else would 7229 * be a serious bug, and as a result, they aren't even visible 7230 * under any other configuration. 7231 */ 7232 7233/** 7234 * curr_task - return the current task for a given cpu. 7235 * @cpu: the processor in question. 7236 * 7237 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7238 */ 7239struct task_struct *curr_task(int cpu) 7240{ 7241 return cpu_curr(cpu); 7242} 7243 7244#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 7245 7246#ifdef CONFIG_IA64 7247/** 7248 * set_curr_task - set the current task for a given cpu. 7249 * @cpu: the processor in question. 7250 * @p: the task pointer to set. 7251 * 7252 * Description: This function must only be used when non-maskable interrupts 7253 * are serviced on a separate stack. It allows the architecture to switch the 7254 * notion of the current task on a cpu in a non-blocking manner. This function 7255 * must be called with all CPU's synchronized, and interrupts disabled, the 7256 * and caller must save the original value of the current task (see 7257 * curr_task() above) and restore that value before reenabling interrupts and 7258 * re-starting the system. 7259 * 7260 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7261 */ 7262void set_curr_task(int cpu, struct task_struct *p) 7263{ 7264 cpu_curr(cpu) = p; 7265} 7266 7267#endif 7268 7269#ifdef CONFIG_CGROUP_SCHED 7270/* task_group_lock serializes the addition/removal of task groups */ 7271static DEFINE_SPINLOCK(task_group_lock); 7272 7273static void free_sched_group(struct task_group *tg) 7274{ 7275 free_fair_sched_group(tg); 7276 free_rt_sched_group(tg); 7277 autogroup_free(tg); 7278 kfree(tg); 7279} 7280 7281/* allocate runqueue etc for a new task group */ 7282struct task_group *sched_create_group(struct task_group *parent) 7283{ 7284 struct task_group *tg; 7285 unsigned long flags; 7286 7287 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 7288 if (!tg) 7289 return ERR_PTR(-ENOMEM); 7290 7291 if (!alloc_fair_sched_group(tg, parent)) 7292 goto err; 7293 7294 if (!alloc_rt_sched_group(tg, parent)) 7295 goto err; 7296 7297 spin_lock_irqsave(&task_group_lock, flags); 7298 list_add_rcu(&tg->list, &task_groups); 7299 7300 WARN_ON(!parent); /* root should already exist */ 7301 7302 tg->parent = parent; 7303 INIT_LIST_HEAD(&tg->children); 7304 list_add_rcu(&tg->siblings, &parent->children); 7305 spin_unlock_irqrestore(&task_group_lock, flags); 7306 7307 return tg; 7308 7309err: 7310 free_sched_group(tg); 7311 return ERR_PTR(-ENOMEM); 7312} 7313 7314/* rcu callback to free various structures associated with a task group */ 7315static void free_sched_group_rcu(struct rcu_head *rhp) 7316{ 7317 /* now it should be safe to free those cfs_rqs */ 7318 free_sched_group(container_of(rhp, struct task_group, rcu)); 7319} 7320 7321/* Destroy runqueue etc associated with a task group */ 7322void sched_destroy_group(struct task_group *tg) 7323{ 7324 unsigned long flags; 7325 int i; 7326 7327 /* end participation in shares distribution */ 7328 for_each_possible_cpu(i) 7329 unregister_fair_sched_group(tg, i); 7330 7331 spin_lock_irqsave(&task_group_lock, flags); 7332 list_del_rcu(&tg->list); 7333 list_del_rcu(&tg->siblings); 7334 spin_unlock_irqrestore(&task_group_lock, flags); 7335 7336 /* wait for possible concurrent references to cfs_rqs complete */ 7337 call_rcu(&tg->rcu, free_sched_group_rcu); 7338} 7339 7340/* change task's runqueue when it moves between groups. 7341 * The caller of this function should have put the task in its new group 7342 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to 7343 * reflect its new group. 7344 */ 7345void sched_move_task(struct task_struct *tsk) 7346{ 7347 int on_rq, running; 7348 unsigned long flags; 7349 struct rq *rq; 7350 7351 rq = task_rq_lock(tsk, &flags); 7352 7353 running = task_current(rq, tsk); 7354 on_rq = tsk->on_rq; 7355 7356 if (on_rq) 7357 dequeue_task(rq, tsk, 0); 7358 if (unlikely(running)) 7359 tsk->sched_class->put_prev_task(rq, tsk); 7360 7361#ifdef CONFIG_FAIR_GROUP_SCHED 7362 if (tsk->sched_class->task_move_group) 7363 tsk->sched_class->task_move_group(tsk, on_rq); 7364 else 7365#endif 7366 set_task_rq(tsk, task_cpu(tsk)); 7367 7368 if (unlikely(running)) 7369 tsk->sched_class->set_curr_task(rq); 7370 if (on_rq) 7371 enqueue_task(rq, tsk, 0); 7372 7373 task_rq_unlock(rq, tsk, &flags); 7374} 7375#endif /* CONFIG_CGROUP_SCHED */ 7376 7377#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH) 7378static unsigned long to_ratio(u64 period, u64 runtime) 7379{ 7380 if (runtime == RUNTIME_INF) 7381 return 1ULL << 20; 7382 7383 return div64_u64(runtime << 20, period); 7384} 7385#endif 7386 7387#ifdef CONFIG_RT_GROUP_SCHED 7388/* 7389 * Ensure that the real time constraints are schedulable. 7390 */ 7391static DEFINE_MUTEX(rt_constraints_mutex); 7392 7393/* Must be called with tasklist_lock held */ 7394static inline int tg_has_rt_tasks(struct task_group *tg) 7395{ 7396 struct task_struct *g, *p; 7397 7398 do_each_thread(g, p) { 7399 if (rt_task(p) && task_rq(p)->rt.tg == tg) 7400 return 1; 7401 } while_each_thread(g, p); 7402 7403 return 0; 7404} 7405 7406struct rt_schedulable_data { 7407 struct task_group *tg; 7408 u64 rt_period; 7409 u64 rt_runtime; 7410}; 7411 7412static int tg_rt_schedulable(struct task_group *tg, void *data) 7413{ 7414 struct rt_schedulable_data *d = data; 7415 struct task_group *child; 7416 unsigned long total, sum = 0; 7417 u64 period, runtime; 7418 7419 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7420 runtime = tg->rt_bandwidth.rt_runtime; 7421 7422 if (tg == d->tg) { 7423 period = d->rt_period; 7424 runtime = d->rt_runtime; 7425 } 7426 7427 /* 7428 * Cannot have more runtime than the period. 7429 */ 7430 if (runtime > period && runtime != RUNTIME_INF) 7431 return -EINVAL; 7432 7433 /* 7434 * Ensure we don't starve existing RT tasks. 7435 */ 7436 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 7437 return -EBUSY; 7438 7439 total = to_ratio(period, runtime); 7440 7441 /* 7442 * Nobody can have more than the global setting allows. 7443 */ 7444 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 7445 return -EINVAL; 7446 7447 /* 7448 * The sum of our children's runtime should not exceed our own. 7449 */ 7450 list_for_each_entry_rcu(child, &tg->children, siblings) { 7451 period = ktime_to_ns(child->rt_bandwidth.rt_period); 7452 runtime = child->rt_bandwidth.rt_runtime; 7453 7454 if (child == d->tg) { 7455 period = d->rt_period; 7456 runtime = d->rt_runtime; 7457 } 7458 7459 sum += to_ratio(period, runtime); 7460 } 7461 7462 if (sum > total) 7463 return -EINVAL; 7464 7465 return 0; 7466} 7467 7468static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 7469{ 7470 int ret; 7471 7472 struct rt_schedulable_data data = { 7473 .tg = tg, 7474 .rt_period = period, 7475 .rt_runtime = runtime, 7476 }; 7477 7478 rcu_read_lock(); 7479 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 7480 rcu_read_unlock(); 7481 7482 return ret; 7483} 7484 7485static int tg_set_rt_bandwidth(struct task_group *tg, 7486 u64 rt_period, u64 rt_runtime) 7487{ 7488 int i, err = 0; 7489 7490 mutex_lock(&rt_constraints_mutex); 7491 read_lock(&tasklist_lock); 7492 err = __rt_schedulable(tg, rt_period, rt_runtime); 7493 if (err) 7494 goto unlock; 7495 7496 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7497 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 7498 tg->rt_bandwidth.rt_runtime = rt_runtime; 7499 7500 for_each_possible_cpu(i) { 7501 struct rt_rq *rt_rq = tg->rt_rq[i]; 7502 7503 raw_spin_lock(&rt_rq->rt_runtime_lock); 7504 rt_rq->rt_runtime = rt_runtime; 7505 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7506 } 7507 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7508unlock: 7509 read_unlock(&tasklist_lock); 7510 mutex_unlock(&rt_constraints_mutex); 7511 7512 return err; 7513} 7514 7515int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7516{ 7517 u64 rt_runtime, rt_period; 7518 7519 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7520 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7521 if (rt_runtime_us < 0) 7522 rt_runtime = RUNTIME_INF; 7523 7524 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7525} 7526 7527long sched_group_rt_runtime(struct task_group *tg) 7528{ 7529 u64 rt_runtime_us; 7530 7531 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 7532 return -1; 7533 7534 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 7535 do_div(rt_runtime_us, NSEC_PER_USEC); 7536 return rt_runtime_us; 7537} 7538 7539int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) 7540{ 7541 u64 rt_runtime, rt_period; 7542 7543 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 7544 rt_runtime = tg->rt_bandwidth.rt_runtime; 7545 7546 if (rt_period == 0) 7547 return -EINVAL; 7548 7549 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7550} 7551 7552long sched_group_rt_period(struct task_group *tg) 7553{ 7554 u64 rt_period_us; 7555 7556 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 7557 do_div(rt_period_us, NSEC_PER_USEC); 7558 return rt_period_us; 7559} 7560 7561static int sched_rt_global_constraints(void) 7562{ 7563 u64 runtime, period; 7564 int ret = 0; 7565 7566 if (sysctl_sched_rt_period <= 0) 7567 return -EINVAL; 7568 7569 runtime = global_rt_runtime(); 7570 period = global_rt_period(); 7571 7572 /* 7573 * Sanity check on the sysctl variables. 7574 */ 7575 if (runtime > period && runtime != RUNTIME_INF) 7576 return -EINVAL; 7577 7578 mutex_lock(&rt_constraints_mutex); 7579 read_lock(&tasklist_lock); 7580 ret = __rt_schedulable(NULL, 0, 0); 7581 read_unlock(&tasklist_lock); 7582 mutex_unlock(&rt_constraints_mutex); 7583 7584 return ret; 7585} 7586 7587int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 7588{ 7589 /* Don't accept realtime tasks when there is no way for them to run */ 7590 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 7591 return 0; 7592 7593 return 1; 7594} 7595 7596#else /* !CONFIG_RT_GROUP_SCHED */ 7597static int sched_rt_global_constraints(void) 7598{ 7599 unsigned long flags; 7600 int i; 7601 7602 if (sysctl_sched_rt_period <= 0) 7603 return -EINVAL; 7604 7605 /* 7606 * There's always some RT tasks in the root group 7607 * -- migration, kstopmachine etc.. 7608 */ 7609 if (sysctl_sched_rt_runtime == 0) 7610 return -EBUSY; 7611 7612 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 7613 for_each_possible_cpu(i) { 7614 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 7615 7616 raw_spin_lock(&rt_rq->rt_runtime_lock); 7617 rt_rq->rt_runtime = global_rt_runtime(); 7618 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7619 } 7620 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 7621 7622 return 0; 7623} 7624#endif /* CONFIG_RT_GROUP_SCHED */ 7625 7626int sched_rt_handler(struct ctl_table *table, int write, 7627 void __user *buffer, size_t *lenp, 7628 loff_t *ppos) 7629{ 7630 int ret; 7631 int old_period, old_runtime; 7632 static DEFINE_MUTEX(mutex); 7633 7634 mutex_lock(&mutex); 7635 old_period = sysctl_sched_rt_period; 7636 old_runtime = sysctl_sched_rt_runtime; 7637 7638 ret = proc_dointvec(table, write, buffer, lenp, ppos); 7639 7640 if (!ret && write) { 7641 ret = sched_rt_global_constraints(); 7642 if (ret) { 7643 sysctl_sched_rt_period = old_period; 7644 sysctl_sched_rt_runtime = old_runtime; 7645 } else { 7646 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 7647 def_rt_bandwidth.rt_period = 7648 ns_to_ktime(global_rt_period()); 7649 } 7650 } 7651 mutex_unlock(&mutex); 7652 7653 return ret; 7654} 7655 7656#ifdef CONFIG_CGROUP_SCHED 7657 7658/* return corresponding task_group object of a cgroup */ 7659static inline struct task_group *cgroup_tg(struct cgroup *cgrp) 7660{ 7661 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), 7662 struct task_group, css); 7663} 7664 7665static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp) 7666{ 7667 struct task_group *tg, *parent; 7668 7669 if (!cgrp->parent) { 7670 /* This is early initialization for the top cgroup */ 7671 return &root_task_group.css; 7672 } 7673 7674 parent = cgroup_tg(cgrp->parent); 7675 tg = sched_create_group(parent); 7676 if (IS_ERR(tg)) 7677 return ERR_PTR(-ENOMEM); 7678 7679 return &tg->css; 7680} 7681 7682static void cpu_cgroup_destroy(struct cgroup *cgrp) 7683{ 7684 struct task_group *tg = cgroup_tg(cgrp); 7685 7686 sched_destroy_group(tg); 7687} 7688 7689static int cpu_cgroup_can_attach(struct cgroup *cgrp, 7690 struct cgroup_taskset *tset) 7691{ 7692 struct task_struct *task; 7693 7694 cgroup_taskset_for_each(task, cgrp, tset) { 7695#ifdef CONFIG_RT_GROUP_SCHED 7696 if (!sched_rt_can_attach(cgroup_tg(cgrp), task)) 7697 return -EINVAL; 7698#else 7699 /* We don't support RT-tasks being in separate groups */ 7700 if (task->sched_class != &fair_sched_class) 7701 return -EINVAL; 7702#endif 7703 } 7704 return 0; 7705} 7706 7707static void cpu_cgroup_attach(struct cgroup *cgrp, 7708 struct cgroup_taskset *tset) 7709{ 7710 struct task_struct *task; 7711 7712 cgroup_taskset_for_each(task, cgrp, tset) 7713 sched_move_task(task); 7714} 7715 7716static void 7717cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, 7718 struct task_struct *task) 7719{ 7720 /* 7721 * cgroup_exit() is called in the copy_process() failure path. 7722 * Ignore this case since the task hasn't ran yet, this avoids 7723 * trying to poke a half freed task state from generic code. 7724 */ 7725 if (!(task->flags & PF_EXITING)) 7726 return; 7727 7728 sched_move_task(task); 7729} 7730 7731#ifdef CONFIG_FAIR_GROUP_SCHED 7732static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, 7733 u64 shareval) 7734{ 7735 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval)); 7736} 7737 7738static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) 7739{ 7740 struct task_group *tg = cgroup_tg(cgrp); 7741 7742 return (u64) scale_load_down(tg->shares); 7743} 7744 7745#ifdef CONFIG_CFS_BANDWIDTH 7746static DEFINE_MUTEX(cfs_constraints_mutex); 7747 7748const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 7749const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 7750 7751static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 7752 7753static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 7754{ 7755 int i, ret = 0, runtime_enabled, runtime_was_enabled; 7756 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7757 7758 if (tg == &root_task_group) 7759 return -EINVAL; 7760 7761 /* 7762 * Ensure we have at some amount of bandwidth every period. This is 7763 * to prevent reaching a state of large arrears when throttled via 7764 * entity_tick() resulting in prolonged exit starvation. 7765 */ 7766 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 7767 return -EINVAL; 7768 7769 /* 7770 * Likewise, bound things on the otherside by preventing insane quota 7771 * periods. This also allows us to normalize in computing quota 7772 * feasibility. 7773 */ 7774 if (period > max_cfs_quota_period) 7775 return -EINVAL; 7776 7777 mutex_lock(&cfs_constraints_mutex); 7778 ret = __cfs_schedulable(tg, period, quota); 7779 if (ret) 7780 goto out_unlock; 7781 7782 runtime_enabled = quota != RUNTIME_INF; 7783 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7784 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled); 7785 raw_spin_lock_irq(&cfs_b->lock); 7786 cfs_b->period = ns_to_ktime(period); 7787 cfs_b->quota = quota; 7788 7789 __refill_cfs_bandwidth_runtime(cfs_b); 7790 /* restart the period timer (if active) to handle new period expiry */ 7791 if (runtime_enabled && cfs_b->timer_active) { 7792 /* force a reprogram */ 7793 cfs_b->timer_active = 0; 7794 __start_cfs_bandwidth(cfs_b); 7795 } 7796 raw_spin_unlock_irq(&cfs_b->lock); 7797 7798 for_each_possible_cpu(i) { 7799 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 7800 struct rq *rq = cfs_rq->rq; 7801 7802 raw_spin_lock_irq(&rq->lock); 7803 cfs_rq->runtime_enabled = runtime_enabled; 7804 cfs_rq->runtime_remaining = 0; 7805 7806 if (cfs_rq->throttled) 7807 unthrottle_cfs_rq(cfs_rq); 7808 raw_spin_unlock_irq(&rq->lock); 7809 } 7810out_unlock: 7811 mutex_unlock(&cfs_constraints_mutex); 7812 7813 return ret; 7814} 7815 7816int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 7817{ 7818 u64 quota, period; 7819 7820 period = ktime_to_ns(tg->cfs_bandwidth.period); 7821 if (cfs_quota_us < 0) 7822 quota = RUNTIME_INF; 7823 else 7824 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 7825 7826 return tg_set_cfs_bandwidth(tg, period, quota); 7827} 7828 7829long tg_get_cfs_quota(struct task_group *tg) 7830{ 7831 u64 quota_us; 7832 7833 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 7834 return -1; 7835 7836 quota_us = tg->cfs_bandwidth.quota; 7837 do_div(quota_us, NSEC_PER_USEC); 7838 7839 return quota_us; 7840} 7841 7842int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 7843{ 7844 u64 quota, period; 7845 7846 period = (u64)cfs_period_us * NSEC_PER_USEC; 7847 quota = tg->cfs_bandwidth.quota; 7848 7849 return tg_set_cfs_bandwidth(tg, period, quota); 7850} 7851 7852long tg_get_cfs_period(struct task_group *tg) 7853{ 7854 u64 cfs_period_us; 7855 7856 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 7857 do_div(cfs_period_us, NSEC_PER_USEC); 7858 7859 return cfs_period_us; 7860} 7861 7862static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft) 7863{ 7864 return tg_get_cfs_quota(cgroup_tg(cgrp)); 7865} 7866 7867static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype, 7868 s64 cfs_quota_us) 7869{ 7870 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us); 7871} 7872 7873static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft) 7874{ 7875 return tg_get_cfs_period(cgroup_tg(cgrp)); 7876} 7877 7878static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype, 7879 u64 cfs_period_us) 7880{ 7881 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us); 7882} 7883 7884struct cfs_schedulable_data { 7885 struct task_group *tg; 7886 u64 period, quota; 7887}; 7888 7889/* 7890 * normalize group quota/period to be quota/max_period 7891 * note: units are usecs 7892 */ 7893static u64 normalize_cfs_quota(struct task_group *tg, 7894 struct cfs_schedulable_data *d) 7895{ 7896 u64 quota, period; 7897 7898 if (tg == d->tg) { 7899 period = d->period; 7900 quota = d->quota; 7901 } else { 7902 period = tg_get_cfs_period(tg); 7903 quota = tg_get_cfs_quota(tg); 7904 } 7905 7906 /* note: these should typically be equivalent */ 7907 if (quota == RUNTIME_INF || quota == -1) 7908 return RUNTIME_INF; 7909 7910 return to_ratio(period, quota); 7911} 7912 7913static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 7914{ 7915 struct cfs_schedulable_data *d = data; 7916 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7917 s64 quota = 0, parent_quota = -1; 7918 7919 if (!tg->parent) { 7920 quota = RUNTIME_INF; 7921 } else { 7922 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 7923 7924 quota = normalize_cfs_quota(tg, d); 7925 parent_quota = parent_b->hierarchal_quota; 7926 7927 /* 7928 * ensure max(child_quota) <= parent_quota, inherit when no 7929 * limit is set 7930 */ 7931 if (quota == RUNTIME_INF) 7932 quota = parent_quota; 7933 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 7934 return -EINVAL; 7935 } 7936 cfs_b->hierarchal_quota = quota; 7937 7938 return 0; 7939} 7940 7941static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 7942{ 7943 int ret; 7944 struct cfs_schedulable_data data = { 7945 .tg = tg, 7946 .period = period, 7947 .quota = quota, 7948 }; 7949 7950 if (quota != RUNTIME_INF) { 7951 do_div(data.period, NSEC_PER_USEC); 7952 do_div(data.quota, NSEC_PER_USEC); 7953 } 7954 7955 rcu_read_lock(); 7956 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 7957 rcu_read_unlock(); 7958 7959 return ret; 7960} 7961 7962static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, 7963 struct cgroup_map_cb *cb) 7964{ 7965 struct task_group *tg = cgroup_tg(cgrp); 7966 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7967 7968 cb->fill(cb, "nr_periods", cfs_b->nr_periods); 7969 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled); 7970 cb->fill(cb, "throttled_time", cfs_b->throttled_time); 7971 7972 return 0; 7973} 7974#endif /* CONFIG_CFS_BANDWIDTH */ 7975#endif /* CONFIG_FAIR_GROUP_SCHED */ 7976 7977#ifdef CONFIG_RT_GROUP_SCHED 7978static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, 7979 s64 val) 7980{ 7981 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); 7982} 7983 7984static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) 7985{ 7986 return sched_group_rt_runtime(cgroup_tg(cgrp)); 7987} 7988 7989static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, 7990 u64 rt_period_us) 7991{ 7992 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); 7993} 7994 7995static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) 7996{ 7997 return sched_group_rt_period(cgroup_tg(cgrp)); 7998} 7999#endif /* CONFIG_RT_GROUP_SCHED */ 8000 8001static struct cftype cpu_files[] = { 8002#ifdef CONFIG_FAIR_GROUP_SCHED 8003 { 8004 .name = "shares", 8005 .read_u64 = cpu_shares_read_u64, 8006 .write_u64 = cpu_shares_write_u64, 8007 }, 8008#endif 8009#ifdef CONFIG_CFS_BANDWIDTH 8010 { 8011 .name = "cfs_quota_us", 8012 .read_s64 = cpu_cfs_quota_read_s64, 8013 .write_s64 = cpu_cfs_quota_write_s64, 8014 }, 8015 { 8016 .name = "cfs_period_us", 8017 .read_u64 = cpu_cfs_period_read_u64, 8018 .write_u64 = cpu_cfs_period_write_u64, 8019 }, 8020 { 8021 .name = "stat", 8022 .read_map = cpu_stats_show, 8023 }, 8024#endif 8025#ifdef CONFIG_RT_GROUP_SCHED 8026 { 8027 .name = "rt_runtime_us", 8028 .read_s64 = cpu_rt_runtime_read, 8029 .write_s64 = cpu_rt_runtime_write, 8030 }, 8031 { 8032 .name = "rt_period_us", 8033 .read_u64 = cpu_rt_period_read_uint, 8034 .write_u64 = cpu_rt_period_write_uint, 8035 }, 8036#endif 8037 { } /* terminate */ 8038}; 8039 8040struct cgroup_subsys cpu_cgroup_subsys = { 8041 .name = "cpu", 8042 .create = cpu_cgroup_create, 8043 .destroy = cpu_cgroup_destroy, 8044 .can_attach = cpu_cgroup_can_attach, 8045 .attach = cpu_cgroup_attach, 8046 .exit = cpu_cgroup_exit, 8047 .subsys_id = cpu_cgroup_subsys_id, 8048 .base_cftypes = cpu_files, 8049 .early_init = 1, 8050}; 8051 8052#endif /* CONFIG_CGROUP_SCHED */ 8053 8054#ifdef CONFIG_CGROUP_CPUACCT 8055 8056/* 8057 * CPU accounting code for task groups. 8058 * 8059 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh 8060 * (balbir@in.ibm.com). 8061 */ 8062 8063/* create a new cpu accounting group */ 8064static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp) 8065{ 8066 struct cpuacct *ca; 8067 8068 if (!cgrp->parent) 8069 return &root_cpuacct.css; 8070 8071 ca = kzalloc(sizeof(*ca), GFP_KERNEL); 8072 if (!ca) 8073 goto out; 8074 8075 ca->cpuusage = alloc_percpu(u64); 8076 if (!ca->cpuusage) 8077 goto out_free_ca; 8078 8079 ca->cpustat = alloc_percpu(struct kernel_cpustat); 8080 if (!ca->cpustat) 8081 goto out_free_cpuusage; 8082 8083 return &ca->css; 8084 8085out_free_cpuusage: 8086 free_percpu(ca->cpuusage); 8087out_free_ca: 8088 kfree(ca); 8089out: 8090 return ERR_PTR(-ENOMEM); 8091} 8092 8093/* destroy an existing cpu accounting group */ 8094static void cpuacct_destroy(struct cgroup *cgrp) 8095{ 8096 struct cpuacct *ca = cgroup_ca(cgrp); 8097 8098 free_percpu(ca->cpustat); 8099 free_percpu(ca->cpuusage); 8100 kfree(ca); 8101} 8102 8103static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) 8104{ 8105 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); 8106 u64 data; 8107 8108#ifndef CONFIG_64BIT 8109 /* 8110 * Take rq->lock to make 64-bit read safe on 32-bit platforms. 8111 */ 8112 raw_spin_lock_irq(&cpu_rq(cpu)->lock); 8113 data = *cpuusage; 8114 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); 8115#else 8116 data = *cpuusage; 8117#endif 8118 8119 return data; 8120} 8121 8122static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) 8123{ 8124 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); 8125 8126#ifndef CONFIG_64BIT 8127 /* 8128 * Take rq->lock to make 64-bit write safe on 32-bit platforms. 8129 */ 8130 raw_spin_lock_irq(&cpu_rq(cpu)->lock); 8131 *cpuusage = val; 8132 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); 8133#else 8134 *cpuusage = val; 8135#endif 8136} 8137 8138/* return total cpu usage (in nanoseconds) of a group */ 8139static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) 8140{ 8141 struct cpuacct *ca = cgroup_ca(cgrp); 8142 u64 totalcpuusage = 0; 8143 int i; 8144 8145 for_each_present_cpu(i) 8146 totalcpuusage += cpuacct_cpuusage_read(ca, i); 8147 8148 return totalcpuusage; 8149} 8150 8151static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, 8152 u64 reset) 8153{ 8154 struct cpuacct *ca = cgroup_ca(cgrp); 8155 int err = 0; 8156 int i; 8157 8158 if (reset) { 8159 err = -EINVAL; 8160 goto out; 8161 } 8162 8163 for_each_present_cpu(i) 8164 cpuacct_cpuusage_write(ca, i, 0); 8165 8166out: 8167 return err; 8168} 8169 8170static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, 8171 struct seq_file *m) 8172{ 8173 struct cpuacct *ca = cgroup_ca(cgroup); 8174 u64 percpu; 8175 int i; 8176 8177 for_each_present_cpu(i) { 8178 percpu = cpuacct_cpuusage_read(ca, i); 8179 seq_printf(m, "%llu ", (unsigned long long) percpu); 8180 } 8181 seq_printf(m, "\n"); 8182 return 0; 8183} 8184 8185static const char *cpuacct_stat_desc[] = { 8186 [CPUACCT_STAT_USER] = "user", 8187 [CPUACCT_STAT_SYSTEM] = "system", 8188}; 8189 8190static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, 8191 struct cgroup_map_cb *cb) 8192{ 8193 struct cpuacct *ca = cgroup_ca(cgrp); 8194 int cpu; 8195 s64 val = 0; 8196 8197 for_each_online_cpu(cpu) { 8198 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); 8199 val += kcpustat->cpustat[CPUTIME_USER]; 8200 val += kcpustat->cpustat[CPUTIME_NICE]; 8201 } 8202 val = cputime64_to_clock_t(val); 8203 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val); 8204 8205 val = 0; 8206 for_each_online_cpu(cpu) { 8207 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); 8208 val += kcpustat->cpustat[CPUTIME_SYSTEM]; 8209 val += kcpustat->cpustat[CPUTIME_IRQ]; 8210 val += kcpustat->cpustat[CPUTIME_SOFTIRQ]; 8211 } 8212 8213 val = cputime64_to_clock_t(val); 8214 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val); 8215 8216 return 0; 8217} 8218 8219static struct cftype files[] = { 8220 { 8221 .name = "usage", 8222 .read_u64 = cpuusage_read, 8223 .write_u64 = cpuusage_write, 8224 }, 8225 { 8226 .name = "usage_percpu", 8227 .read_seq_string = cpuacct_percpu_seq_read, 8228 }, 8229 { 8230 .name = "stat", 8231 .read_map = cpuacct_stats_show, 8232 }, 8233 { } /* terminate */ 8234}; 8235 8236/* 8237 * charge this task's execution time to its accounting group. 8238 * 8239 * called with rq->lock held. 8240 */ 8241void cpuacct_charge(struct task_struct *tsk, u64 cputime) 8242{ 8243 struct cpuacct *ca; 8244 int cpu; 8245 8246 if (unlikely(!cpuacct_subsys.active)) 8247 return; 8248 8249 cpu = task_cpu(tsk); 8250 8251 rcu_read_lock(); 8252 8253 ca = task_ca(tsk); 8254 8255 for (; ca; ca = parent_ca(ca)) { 8256 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); 8257 *cpuusage += cputime; 8258 } 8259 8260 rcu_read_unlock(); 8261} 8262 8263struct cgroup_subsys cpuacct_subsys = { 8264 .name = "cpuacct", 8265 .create = cpuacct_create, 8266 .destroy = cpuacct_destroy, 8267 .subsys_id = cpuacct_subsys_id, 8268 .base_cftypes = files, 8269}; 8270#endif /* CONFIG_CGROUP_CPUACCT */ 8271