cputime.c revision 1e4dda08b4c39b3d8f4a3ee7269d49e0200c8af8
1#include <linux/export.h> 2#include <linux/sched.h> 3#include <linux/tsacct_kern.h> 4#include <linux/kernel_stat.h> 5#include <linux/static_key.h> 6#include <linux/context_tracking.h> 7#include "sched.h" 8 9 10#ifdef CONFIG_IRQ_TIME_ACCOUNTING 11 12/* 13 * There are no locks covering percpu hardirq/softirq time. 14 * They are only modified in vtime_account, on corresponding CPU 15 * with interrupts disabled. So, writes are safe. 16 * They are read and saved off onto struct rq in update_rq_clock(). 17 * This may result in other CPU reading this CPU's irq time and can 18 * race with irq/vtime_account on this CPU. We would either get old 19 * or new value with a side effect of accounting a slice of irq time to wrong 20 * task when irq is in progress while we read rq->clock. That is a worthy 21 * compromise in place of having locks on each irq in account_system_time. 22 */ 23DEFINE_PER_CPU(u64, cpu_hardirq_time); 24DEFINE_PER_CPU(u64, cpu_softirq_time); 25 26static DEFINE_PER_CPU(u64, irq_start_time); 27static int sched_clock_irqtime; 28 29void enable_sched_clock_irqtime(void) 30{ 31 sched_clock_irqtime = 1; 32} 33 34void disable_sched_clock_irqtime(void) 35{ 36 sched_clock_irqtime = 0; 37} 38 39#ifndef CONFIG_64BIT 40DEFINE_PER_CPU(seqcount_t, irq_time_seq); 41#endif /* CONFIG_64BIT */ 42 43/* 44 * Called before incrementing preempt_count on {soft,}irq_enter 45 * and before decrementing preempt_count on {soft,}irq_exit. 46 */ 47void irqtime_account_irq(struct task_struct *curr) 48{ 49 unsigned long flags; 50 s64 delta; 51 int cpu; 52 53 if (!sched_clock_irqtime) 54 return; 55 56 local_irq_save(flags); 57 58 cpu = smp_processor_id(); 59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); 60 __this_cpu_add(irq_start_time, delta); 61 62 irq_time_write_begin(); 63 /* 64 * We do not account for softirq time from ksoftirqd here. 65 * We want to continue accounting softirq time to ksoftirqd thread 66 * in that case, so as not to confuse scheduler with a special task 67 * that do not consume any time, but still wants to run. 68 */ 69 if (hardirq_count()) 70 __this_cpu_add(cpu_hardirq_time, delta); 71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) 72 __this_cpu_add(cpu_softirq_time, delta); 73 74 irq_time_write_end(); 75 local_irq_restore(flags); 76} 77EXPORT_SYMBOL_GPL(irqtime_account_irq); 78 79static int irqtime_account_hi_update(void) 80{ 81 u64 *cpustat = kcpustat_this_cpu->cpustat; 82 unsigned long flags; 83 u64 latest_ns; 84 int ret = 0; 85 86 local_irq_save(flags); 87 latest_ns = this_cpu_read(cpu_hardirq_time); 88 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) 89 ret = 1; 90 local_irq_restore(flags); 91 return ret; 92} 93 94static int irqtime_account_si_update(void) 95{ 96 u64 *cpustat = kcpustat_this_cpu->cpustat; 97 unsigned long flags; 98 u64 latest_ns; 99 int ret = 0; 100 101 local_irq_save(flags); 102 latest_ns = this_cpu_read(cpu_softirq_time); 103 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) 104 ret = 1; 105 local_irq_restore(flags); 106 return ret; 107} 108 109#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 110 111#define sched_clock_irqtime (0) 112 113#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ 114 115static inline void task_group_account_field(struct task_struct *p, int index, 116 u64 tmp) 117{ 118 /* 119 * Since all updates are sure to touch the root cgroup, we 120 * get ourselves ahead and touch it first. If the root cgroup 121 * is the only cgroup, then nothing else should be necessary. 122 * 123 */ 124 __this_cpu_add(kernel_cpustat.cpustat[index], tmp); 125 126 cpuacct_account_field(p, index, tmp); 127} 128 129/* 130 * Account user cpu time to a process. 131 * @p: the process that the cpu time gets accounted to 132 * @cputime: the cpu time spent in user space since the last update 133 * @cputime_scaled: cputime scaled by cpu frequency 134 */ 135void account_user_time(struct task_struct *p, cputime_t cputime, 136 cputime_t cputime_scaled) 137{ 138 int index; 139 140 /* Add user time to process. */ 141 p->utime += cputime; 142 p->utimescaled += cputime_scaled; 143 account_group_user_time(p, cputime); 144 145 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; 146 147 /* Add user time to cpustat. */ 148 task_group_account_field(p, index, (__force u64) cputime); 149 150 /* Account for user time used */ 151 acct_account_cputime(p); 152} 153 154/* 155 * Account guest cpu time to a process. 156 * @p: the process that the cpu time gets accounted to 157 * @cputime: the cpu time spent in virtual machine since the last update 158 * @cputime_scaled: cputime scaled by cpu frequency 159 */ 160static void account_guest_time(struct task_struct *p, cputime_t cputime, 161 cputime_t cputime_scaled) 162{ 163 u64 *cpustat = kcpustat_this_cpu->cpustat; 164 165 /* Add guest time to process. */ 166 p->utime += cputime; 167 p->utimescaled += cputime_scaled; 168 account_group_user_time(p, cputime); 169 p->gtime += cputime; 170 171 /* Add guest time to cpustat. */ 172 if (task_nice(p) > 0) { 173 cpustat[CPUTIME_NICE] += (__force u64) cputime; 174 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; 175 } else { 176 cpustat[CPUTIME_USER] += (__force u64) cputime; 177 cpustat[CPUTIME_GUEST] += (__force u64) cputime; 178 } 179} 180 181/* 182 * Account system cpu time to a process and desired cpustat field 183 * @p: the process that the cpu time gets accounted to 184 * @cputime: the cpu time spent in kernel space since the last update 185 * @cputime_scaled: cputime scaled by cpu frequency 186 * @target_cputime64: pointer to cpustat field that has to be updated 187 */ 188static inline 189void __account_system_time(struct task_struct *p, cputime_t cputime, 190 cputime_t cputime_scaled, int index) 191{ 192 /* Add system time to process. */ 193 p->stime += cputime; 194 p->stimescaled += cputime_scaled; 195 account_group_system_time(p, cputime); 196 197 /* Add system time to cpustat. */ 198 task_group_account_field(p, index, (__force u64) cputime); 199 200 /* Account for system time used */ 201 acct_account_cputime(p); 202} 203 204/* 205 * Account system cpu time to a process. 206 * @p: the process that the cpu time gets accounted to 207 * @hardirq_offset: the offset to subtract from hardirq_count() 208 * @cputime: the cpu time spent in kernel space since the last update 209 * @cputime_scaled: cputime scaled by cpu frequency 210 */ 211void account_system_time(struct task_struct *p, int hardirq_offset, 212 cputime_t cputime, cputime_t cputime_scaled) 213{ 214 int index; 215 216 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { 217 account_guest_time(p, cputime, cputime_scaled); 218 return; 219 } 220 221 if (hardirq_count() - hardirq_offset) 222 index = CPUTIME_IRQ; 223 else if (in_serving_softirq()) 224 index = CPUTIME_SOFTIRQ; 225 else 226 index = CPUTIME_SYSTEM; 227 228 __account_system_time(p, cputime, cputime_scaled, index); 229} 230 231/* 232 * Account for involuntary wait time. 233 * @cputime: the cpu time spent in involuntary wait 234 */ 235void account_steal_time(cputime_t cputime) 236{ 237 u64 *cpustat = kcpustat_this_cpu->cpustat; 238 239 cpustat[CPUTIME_STEAL] += (__force u64) cputime; 240} 241 242/* 243 * Account for idle time. 244 * @cputime: the cpu time spent in idle wait 245 */ 246void account_idle_time(cputime_t cputime) 247{ 248 u64 *cpustat = kcpustat_this_cpu->cpustat; 249 struct rq *rq = this_rq(); 250 251 if (atomic_read(&rq->nr_iowait) > 0) 252 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; 253 else 254 cpustat[CPUTIME_IDLE] += (__force u64) cputime; 255} 256 257static __always_inline bool steal_account_process_tick(void) 258{ 259#ifdef CONFIG_PARAVIRT 260 if (static_key_false(¶virt_steal_enabled)) { 261 u64 steal; 262 cputime_t steal_ct; 263 264 steal = paravirt_steal_clock(smp_processor_id()); 265 steal -= this_rq()->prev_steal_time; 266 267 /* 268 * cputime_t may be less precise than nsecs (eg: if it's 269 * based on jiffies). Lets cast the result to cputime 270 * granularity and account the rest on the next rounds. 271 */ 272 steal_ct = nsecs_to_cputime(steal); 273 this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct); 274 275 account_steal_time(steal_ct); 276 return steal_ct; 277 } 278#endif 279 return false; 280} 281 282/* 283 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live 284 * tasks (sum on group iteration) belonging to @tsk's group. 285 */ 286void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) 287{ 288 struct signal_struct *sig = tsk->signal; 289 cputime_t utime, stime; 290 struct task_struct *t; 291 292 times->utime = sig->utime; 293 times->stime = sig->stime; 294 times->sum_exec_runtime = sig->sum_sched_runtime; 295 296 rcu_read_lock(); 297 for_each_thread(tsk, t) { 298 task_cputime(t, &utime, &stime); 299 times->utime += utime; 300 times->stime += stime; 301 times->sum_exec_runtime += task_sched_runtime(t); 302 } 303 rcu_read_unlock(); 304} 305 306#ifdef CONFIG_IRQ_TIME_ACCOUNTING 307/* 308 * Account a tick to a process and cpustat 309 * @p: the process that the cpu time gets accounted to 310 * @user_tick: is the tick from userspace 311 * @rq: the pointer to rq 312 * 313 * Tick demultiplexing follows the order 314 * - pending hardirq update 315 * - pending softirq update 316 * - user_time 317 * - idle_time 318 * - system time 319 * - check for guest_time 320 * - else account as system_time 321 * 322 * Check for hardirq is done both for system and user time as there is 323 * no timer going off while we are on hardirq and hence we may never get an 324 * opportunity to update it solely in system time. 325 * p->stime and friends are only updated on system time and not on irq 326 * softirq as those do not count in task exec_runtime any more. 327 */ 328static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 329 struct rq *rq, int ticks) 330{ 331 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy); 332 u64 cputime = (__force u64) cputime_one_jiffy; 333 u64 *cpustat = kcpustat_this_cpu->cpustat; 334 335 if (steal_account_process_tick()) 336 return; 337 338 cputime *= ticks; 339 scaled *= ticks; 340 341 if (irqtime_account_hi_update()) { 342 cpustat[CPUTIME_IRQ] += cputime; 343 } else if (irqtime_account_si_update()) { 344 cpustat[CPUTIME_SOFTIRQ] += cputime; 345 } else if (this_cpu_ksoftirqd() == p) { 346 /* 347 * ksoftirqd time do not get accounted in cpu_softirq_time. 348 * So, we have to handle it separately here. 349 * Also, p->stime needs to be updated for ksoftirqd. 350 */ 351 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ); 352 } else if (user_tick) { 353 account_user_time(p, cputime, scaled); 354 } else if (p == rq->idle) { 355 account_idle_time(cputime); 356 } else if (p->flags & PF_VCPU) { /* System time or guest time */ 357 account_guest_time(p, cputime, scaled); 358 } else { 359 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM); 360 } 361} 362 363static void irqtime_account_idle_ticks(int ticks) 364{ 365 struct rq *rq = this_rq(); 366 367 irqtime_account_process_tick(current, 0, rq, ticks); 368} 369#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 370static inline void irqtime_account_idle_ticks(int ticks) {} 371static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, 372 struct rq *rq, int nr_ticks) {} 373#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 374 375/* 376 * Use precise platform statistics if available: 377 */ 378#ifdef CONFIG_VIRT_CPU_ACCOUNTING 379 380#ifndef __ARCH_HAS_VTIME_TASK_SWITCH 381void vtime_common_task_switch(struct task_struct *prev) 382{ 383 if (is_idle_task(prev)) 384 vtime_account_idle(prev); 385 else 386 vtime_account_system(prev); 387 388#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 389 vtime_account_user(prev); 390#endif 391 arch_vtime_task_switch(prev); 392} 393#endif 394 395/* 396 * Archs that account the whole time spent in the idle task 397 * (outside irq) as idle time can rely on this and just implement 398 * vtime_account_system() and vtime_account_idle(). Archs that 399 * have other meaning of the idle time (s390 only includes the 400 * time spent by the CPU when it's in low power mode) must override 401 * vtime_account(). 402 */ 403#ifndef __ARCH_HAS_VTIME_ACCOUNT 404void vtime_common_account_irq_enter(struct task_struct *tsk) 405{ 406 if (!in_interrupt()) { 407 /* 408 * If we interrupted user, context_tracking_in_user() 409 * is 1 because the context tracking don't hook 410 * on irq entry/exit. This way we know if 411 * we need to flush user time on kernel entry. 412 */ 413 if (context_tracking_in_user()) { 414 vtime_account_user(tsk); 415 return; 416 } 417 418 if (is_idle_task(tsk)) { 419 vtime_account_idle(tsk); 420 return; 421 } 422 } 423 vtime_account_system(tsk); 424} 425EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter); 426#endif /* __ARCH_HAS_VTIME_ACCOUNT */ 427#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 428 429 430#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 431void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 432{ 433 *ut = p->utime; 434 *st = p->stime; 435} 436 437void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 438{ 439 struct task_cputime cputime; 440 441 thread_group_cputime(p, &cputime); 442 443 *ut = cputime.utime; 444 *st = cputime.stime; 445} 446#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 447/* 448 * Account a single tick of cpu time. 449 * @p: the process that the cpu time gets accounted to 450 * @user_tick: indicates if the tick is a user or a system tick 451 */ 452void account_process_tick(struct task_struct *p, int user_tick) 453{ 454 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 455 struct rq *rq = this_rq(); 456 457 if (vtime_accounting_enabled()) 458 return; 459 460 if (sched_clock_irqtime) { 461 irqtime_account_process_tick(p, user_tick, rq, 1); 462 return; 463 } 464 465 if (steal_account_process_tick()) 466 return; 467 468 if (user_tick) 469 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 470 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 471 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, 472 one_jiffy_scaled); 473 else 474 account_idle_time(cputime_one_jiffy); 475} 476 477/* 478 * Account multiple ticks of steal time. 479 * @p: the process from which the cpu time has been stolen 480 * @ticks: number of stolen ticks 481 */ 482void account_steal_ticks(unsigned long ticks) 483{ 484 account_steal_time(jiffies_to_cputime(ticks)); 485} 486 487/* 488 * Account multiple ticks of idle time. 489 * @ticks: number of stolen ticks 490 */ 491void account_idle_ticks(unsigned long ticks) 492{ 493 494 if (sched_clock_irqtime) { 495 irqtime_account_idle_ticks(ticks); 496 return; 497 } 498 499 account_idle_time(jiffies_to_cputime(ticks)); 500} 501 502/* 503 * Perform (stime * rtime) / total, but avoid multiplication overflow by 504 * loosing precision when the numbers are big. 505 */ 506static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) 507{ 508 u64 scaled; 509 510 for (;;) { 511 /* Make sure "rtime" is the bigger of stime/rtime */ 512 if (stime > rtime) 513 swap(rtime, stime); 514 515 /* Make sure 'total' fits in 32 bits */ 516 if (total >> 32) 517 goto drop_precision; 518 519 /* Does rtime (and thus stime) fit in 32 bits? */ 520 if (!(rtime >> 32)) 521 break; 522 523 /* Can we just balance rtime/stime rather than dropping bits? */ 524 if (stime >> 31) 525 goto drop_precision; 526 527 /* We can grow stime and shrink rtime and try to make them both fit */ 528 stime <<= 1; 529 rtime >>= 1; 530 continue; 531 532drop_precision: 533 /* We drop from rtime, it has more bits than stime */ 534 rtime >>= 1; 535 total >>= 1; 536 } 537 538 /* 539 * Make sure gcc understands that this is a 32x32->64 multiply, 540 * followed by a 64/32->64 divide. 541 */ 542 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); 543 return (__force cputime_t) scaled; 544} 545 546/* 547 * Adjust tick based cputime random precision against scheduler 548 * runtime accounting. 549 */ 550static void cputime_adjust(struct task_cputime *curr, 551 struct cputime *prev, 552 cputime_t *ut, cputime_t *st) 553{ 554 cputime_t rtime, stime, utime; 555 556 /* 557 * Tick based cputime accounting depend on random scheduling 558 * timeslices of a task to be interrupted or not by the timer. 559 * Depending on these circumstances, the number of these interrupts 560 * may be over or under-optimistic, matching the real user and system 561 * cputime with a variable precision. 562 * 563 * Fix this by scaling these tick based values against the total 564 * runtime accounted by the CFS scheduler. 565 */ 566 rtime = nsecs_to_cputime(curr->sum_exec_runtime); 567 568 /* 569 * Update userspace visible utime/stime values only if actual execution 570 * time is bigger than already exported. Note that can happen, that we 571 * provided bigger values due to scaling inaccuracy on big numbers. 572 */ 573 if (prev->stime + prev->utime >= rtime) 574 goto out; 575 576 stime = curr->stime; 577 utime = curr->utime; 578 579 if (utime == 0) { 580 stime = rtime; 581 } else if (stime == 0) { 582 utime = rtime; 583 } else { 584 cputime_t total = stime + utime; 585 586 stime = scale_stime((__force u64)stime, 587 (__force u64)rtime, (__force u64)total); 588 utime = rtime - stime; 589 } 590 591 /* 592 * If the tick based count grows faster than the scheduler one, 593 * the result of the scaling may go backward. 594 * Let's enforce monotonicity. 595 */ 596 prev->stime = max(prev->stime, stime); 597 prev->utime = max(prev->utime, utime); 598 599out: 600 *ut = prev->utime; 601 *st = prev->stime; 602} 603 604void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 605{ 606 struct task_cputime cputime = { 607 .sum_exec_runtime = p->se.sum_exec_runtime, 608 }; 609 610 task_cputime(p, &cputime.utime, &cputime.stime); 611 cputime_adjust(&cputime, &p->prev_cputime, ut, st); 612} 613 614/* 615 * Must be called with siglock held. 616 */ 617void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 618{ 619 struct task_cputime cputime; 620 621 thread_group_cputime(p, &cputime); 622 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); 623} 624#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 625 626#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 627static unsigned long long vtime_delta(struct task_struct *tsk) 628{ 629 unsigned long long clock; 630 631 clock = local_clock(); 632 if (clock < tsk->vtime_snap) 633 return 0; 634 635 return clock - tsk->vtime_snap; 636} 637 638static cputime_t get_vtime_delta(struct task_struct *tsk) 639{ 640 unsigned long long delta = vtime_delta(tsk); 641 642 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING); 643 tsk->vtime_snap += delta; 644 645 /* CHECKME: always safe to convert nsecs to cputime? */ 646 return nsecs_to_cputime(delta); 647} 648 649static void __vtime_account_system(struct task_struct *tsk) 650{ 651 cputime_t delta_cpu = get_vtime_delta(tsk); 652 653 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu)); 654} 655 656void vtime_account_system(struct task_struct *tsk) 657{ 658 write_seqlock(&tsk->vtime_seqlock); 659 __vtime_account_system(tsk); 660 write_sequnlock(&tsk->vtime_seqlock); 661} 662 663void vtime_gen_account_irq_exit(struct task_struct *tsk) 664{ 665 write_seqlock(&tsk->vtime_seqlock); 666 __vtime_account_system(tsk); 667 if (context_tracking_in_user()) 668 tsk->vtime_snap_whence = VTIME_USER; 669 write_sequnlock(&tsk->vtime_seqlock); 670} 671 672void vtime_account_user(struct task_struct *tsk) 673{ 674 cputime_t delta_cpu; 675 676 write_seqlock(&tsk->vtime_seqlock); 677 delta_cpu = get_vtime_delta(tsk); 678 tsk->vtime_snap_whence = VTIME_SYS; 679 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); 680 write_sequnlock(&tsk->vtime_seqlock); 681} 682 683void vtime_user_enter(struct task_struct *tsk) 684{ 685 write_seqlock(&tsk->vtime_seqlock); 686 __vtime_account_system(tsk); 687 tsk->vtime_snap_whence = VTIME_USER; 688 write_sequnlock(&tsk->vtime_seqlock); 689} 690 691void vtime_guest_enter(struct task_struct *tsk) 692{ 693 /* 694 * The flags must be updated under the lock with 695 * the vtime_snap flush and update. 696 * That enforces a right ordering and update sequence 697 * synchronization against the reader (task_gtime()) 698 * that can thus safely catch up with a tickless delta. 699 */ 700 write_seqlock(&tsk->vtime_seqlock); 701 __vtime_account_system(tsk); 702 current->flags |= PF_VCPU; 703 write_sequnlock(&tsk->vtime_seqlock); 704} 705EXPORT_SYMBOL_GPL(vtime_guest_enter); 706 707void vtime_guest_exit(struct task_struct *tsk) 708{ 709 write_seqlock(&tsk->vtime_seqlock); 710 __vtime_account_system(tsk); 711 current->flags &= ~PF_VCPU; 712 write_sequnlock(&tsk->vtime_seqlock); 713} 714EXPORT_SYMBOL_GPL(vtime_guest_exit); 715 716void vtime_account_idle(struct task_struct *tsk) 717{ 718 cputime_t delta_cpu = get_vtime_delta(tsk); 719 720 account_idle_time(delta_cpu); 721} 722 723void arch_vtime_task_switch(struct task_struct *prev) 724{ 725 write_seqlock(&prev->vtime_seqlock); 726 prev->vtime_snap_whence = VTIME_SLEEPING; 727 write_sequnlock(&prev->vtime_seqlock); 728 729 write_seqlock(¤t->vtime_seqlock); 730 current->vtime_snap_whence = VTIME_SYS; 731 current->vtime_snap = sched_clock_cpu(smp_processor_id()); 732 write_sequnlock(¤t->vtime_seqlock); 733} 734 735void vtime_init_idle(struct task_struct *t, int cpu) 736{ 737 unsigned long flags; 738 739 write_seqlock_irqsave(&t->vtime_seqlock, flags); 740 t->vtime_snap_whence = VTIME_SYS; 741 t->vtime_snap = sched_clock_cpu(cpu); 742 write_sequnlock_irqrestore(&t->vtime_seqlock, flags); 743} 744 745cputime_t task_gtime(struct task_struct *t) 746{ 747 unsigned int seq; 748 cputime_t gtime; 749 750 do { 751 seq = read_seqbegin(&t->vtime_seqlock); 752 753 gtime = t->gtime; 754 if (t->flags & PF_VCPU) 755 gtime += vtime_delta(t); 756 757 } while (read_seqretry(&t->vtime_seqlock, seq)); 758 759 return gtime; 760} 761 762/* 763 * Fetch cputime raw values from fields of task_struct and 764 * add up the pending nohz execution time since the last 765 * cputime snapshot. 766 */ 767static void 768fetch_task_cputime(struct task_struct *t, 769 cputime_t *u_dst, cputime_t *s_dst, 770 cputime_t *u_src, cputime_t *s_src, 771 cputime_t *udelta, cputime_t *sdelta) 772{ 773 unsigned int seq; 774 unsigned long long delta; 775 776 do { 777 *udelta = 0; 778 *sdelta = 0; 779 780 seq = read_seqbegin(&t->vtime_seqlock); 781 782 if (u_dst) 783 *u_dst = *u_src; 784 if (s_dst) 785 *s_dst = *s_src; 786 787 /* Task is sleeping, nothing to add */ 788 if (t->vtime_snap_whence == VTIME_SLEEPING || 789 is_idle_task(t)) 790 continue; 791 792 delta = vtime_delta(t); 793 794 /* 795 * Task runs either in user or kernel space, add pending nohz time to 796 * the right place. 797 */ 798 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) { 799 *udelta = delta; 800 } else { 801 if (t->vtime_snap_whence == VTIME_SYS) 802 *sdelta = delta; 803 } 804 } while (read_seqretry(&t->vtime_seqlock, seq)); 805} 806 807 808void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) 809{ 810 cputime_t udelta, sdelta; 811 812 fetch_task_cputime(t, utime, stime, &t->utime, 813 &t->stime, &udelta, &sdelta); 814 if (utime) 815 *utime += udelta; 816 if (stime) 817 *stime += sdelta; 818} 819 820void task_cputime_scaled(struct task_struct *t, 821 cputime_t *utimescaled, cputime_t *stimescaled) 822{ 823 cputime_t udelta, sdelta; 824 825 fetch_task_cputime(t, utimescaled, stimescaled, 826 &t->utimescaled, &t->stimescaled, &udelta, &sdelta); 827 if (utimescaled) 828 *utimescaled += cputime_to_scaled(udelta); 829 if (stimescaled) 830 *stimescaled += cputime_to_scaled(sdelta); 831} 832#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 833