tick-broadcast.c revision 1f73a9806bdd07a5106409bbcab3884078bd34fe
1/* 2 * linux/kernel/time/tick-broadcast.c 3 * 4 * This file contains functions which emulate a local clock-event 5 * device via a broadcast event source. 6 * 7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 10 * 11 * This code is licenced under the GPL version 2. For details see 12 * kernel-base/COPYING. 13 */ 14#include <linux/cpu.h> 15#include <linux/err.h> 16#include <linux/hrtimer.h> 17#include <linux/interrupt.h> 18#include <linux/percpu.h> 19#include <linux/profile.h> 20#include <linux/sched.h> 21#include <linux/smp.h> 22#include <linux/module.h> 23 24#include "tick-internal.h" 25 26/* 27 * Broadcast support for broken x86 hardware, where the local apic 28 * timer stops in C3 state. 29 */ 30 31static struct tick_device tick_broadcast_device; 32static cpumask_var_t tick_broadcast_mask; 33static cpumask_var_t tmpmask; 34static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); 35static int tick_broadcast_force; 36 37#ifdef CONFIG_TICK_ONESHOT 38static void tick_broadcast_clear_oneshot(int cpu); 39#else 40static inline void tick_broadcast_clear_oneshot(int cpu) { } 41#endif 42 43/* 44 * Debugging: see timer_list.c 45 */ 46struct tick_device *tick_get_broadcast_device(void) 47{ 48 return &tick_broadcast_device; 49} 50 51struct cpumask *tick_get_broadcast_mask(void) 52{ 53 return tick_broadcast_mask; 54} 55 56/* 57 * Start the device in periodic mode 58 */ 59static void tick_broadcast_start_periodic(struct clock_event_device *bc) 60{ 61 if (bc) 62 tick_setup_periodic(bc, 1); 63} 64 65/* 66 * Check, if the device can be utilized as broadcast device: 67 */ 68static bool tick_check_broadcast_device(struct clock_event_device *curdev, 69 struct clock_event_device *newdev) 70{ 71 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || 72 (newdev->features & CLOCK_EVT_FEAT_C3STOP)) 73 return false; 74 75 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT && 76 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) 77 return false; 78 79 return !curdev || newdev->rating > curdev->rating; 80} 81 82/* 83 * Conditionally install/replace broadcast device 84 */ 85void tick_install_broadcast_device(struct clock_event_device *dev) 86{ 87 struct clock_event_device *cur = tick_broadcast_device.evtdev; 88 89 if (!tick_check_broadcast_device(cur, dev)) 90 return; 91 92 if (!try_module_get(dev->owner)) 93 return; 94 95 clockevents_exchange_device(cur, dev); 96 if (cur) 97 cur->event_handler = clockevents_handle_noop; 98 tick_broadcast_device.evtdev = dev; 99 if (!cpumask_empty(tick_broadcast_mask)) 100 tick_broadcast_start_periodic(dev); 101 /* 102 * Inform all cpus about this. We might be in a situation 103 * where we did not switch to oneshot mode because the per cpu 104 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack 105 * of a oneshot capable broadcast device. Without that 106 * notification the systems stays stuck in periodic mode 107 * forever. 108 */ 109 if (dev->features & CLOCK_EVT_FEAT_ONESHOT) 110 tick_clock_notify(); 111} 112 113/* 114 * Check, if the device is the broadcast device 115 */ 116int tick_is_broadcast_device(struct clock_event_device *dev) 117{ 118 return (dev && tick_broadcast_device.evtdev == dev); 119} 120 121static void err_broadcast(const struct cpumask *mask) 122{ 123 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); 124} 125 126static void tick_device_setup_broadcast_func(struct clock_event_device *dev) 127{ 128 if (!dev->broadcast) 129 dev->broadcast = tick_broadcast; 130 if (!dev->broadcast) { 131 pr_warn_once("%s depends on broadcast, but no broadcast function available\n", 132 dev->name); 133 dev->broadcast = err_broadcast; 134 } 135} 136 137/* 138 * Check, if the device is disfunctional and a place holder, which 139 * needs to be handled by the broadcast device. 140 */ 141int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) 142{ 143 unsigned long flags; 144 int ret = 0; 145 146 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 147 148 /* 149 * Devices might be registered with both periodic and oneshot 150 * mode disabled. This signals, that the device needs to be 151 * operated from the broadcast device and is a placeholder for 152 * the cpu local device. 153 */ 154 if (!tick_device_is_functional(dev)) { 155 dev->event_handler = tick_handle_periodic; 156 tick_device_setup_broadcast_func(dev); 157 cpumask_set_cpu(cpu, tick_broadcast_mask); 158 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 159 ret = 1; 160 } else { 161 /* 162 * When the new device is not affected by the stop 163 * feature and the cpu is marked in the broadcast mask 164 * then clear the broadcast bit. 165 */ 166 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 167 int cpu = smp_processor_id(); 168 cpumask_clear_cpu(cpu, tick_broadcast_mask); 169 tick_broadcast_clear_oneshot(cpu); 170 } else { 171 tick_device_setup_broadcast_func(dev); 172 } 173 } 174 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 175 return ret; 176} 177 178#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 179int tick_receive_broadcast(void) 180{ 181 struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 182 struct clock_event_device *evt = td->evtdev; 183 184 if (!evt) 185 return -ENODEV; 186 187 if (!evt->event_handler) 188 return -EINVAL; 189 190 evt->event_handler(evt); 191 return 0; 192} 193#endif 194 195/* 196 * Broadcast the event to the cpus, which are set in the mask (mangled). 197 */ 198static void tick_do_broadcast(struct cpumask *mask) 199{ 200 int cpu = smp_processor_id(); 201 struct tick_device *td; 202 203 /* 204 * Check, if the current cpu is in the mask 205 */ 206 if (cpumask_test_cpu(cpu, mask)) { 207 cpumask_clear_cpu(cpu, mask); 208 td = &per_cpu(tick_cpu_device, cpu); 209 td->evtdev->event_handler(td->evtdev); 210 } 211 212 if (!cpumask_empty(mask)) { 213 /* 214 * It might be necessary to actually check whether the devices 215 * have different broadcast functions. For now, just use the 216 * one of the first device. This works as long as we have this 217 * misfeature only on x86 (lapic) 218 */ 219 td = &per_cpu(tick_cpu_device, cpumask_first(mask)); 220 td->evtdev->broadcast(mask); 221 } 222} 223 224/* 225 * Periodic broadcast: 226 * - invoke the broadcast handlers 227 */ 228static void tick_do_periodic_broadcast(void) 229{ 230 raw_spin_lock(&tick_broadcast_lock); 231 232 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); 233 tick_do_broadcast(tmpmask); 234 235 raw_spin_unlock(&tick_broadcast_lock); 236} 237 238/* 239 * Event handler for periodic broadcast ticks 240 */ 241static void tick_handle_periodic_broadcast(struct clock_event_device *dev) 242{ 243 ktime_t next; 244 245 tick_do_periodic_broadcast(); 246 247 /* 248 * The device is in periodic mode. No reprogramming necessary: 249 */ 250 if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 251 return; 252 253 /* 254 * Setup the next period for devices, which do not have 255 * periodic mode. We read dev->next_event first and add to it 256 * when the event already expired. clockevents_program_event() 257 * sets dev->next_event only when the event is really 258 * programmed to the device. 259 */ 260 for (next = dev->next_event; ;) { 261 next = ktime_add(next, tick_period); 262 263 if (!clockevents_program_event(dev, next, false)) 264 return; 265 tick_do_periodic_broadcast(); 266 } 267} 268 269/* 270 * Powerstate information: The system enters/leaves a state, where 271 * affected devices might stop 272 */ 273static void tick_do_broadcast_on_off(unsigned long *reason) 274{ 275 struct clock_event_device *bc, *dev; 276 struct tick_device *td; 277 unsigned long flags; 278 int cpu, bc_stopped; 279 280 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 281 282 cpu = smp_processor_id(); 283 td = &per_cpu(tick_cpu_device, cpu); 284 dev = td->evtdev; 285 bc = tick_broadcast_device.evtdev; 286 287 /* 288 * Is the device not affected by the powerstate ? 289 */ 290 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) 291 goto out; 292 293 if (!tick_device_is_functional(dev)) 294 goto out; 295 296 bc_stopped = cpumask_empty(tick_broadcast_mask); 297 298 switch (*reason) { 299 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 300 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 301 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { 302 if (tick_broadcast_device.mode == 303 TICKDEV_MODE_PERIODIC) 304 clockevents_shutdown(dev); 305 } 306 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) 307 tick_broadcast_force = 1; 308 break; 309 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 310 if (!tick_broadcast_force && 311 cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { 312 if (tick_broadcast_device.mode == 313 TICKDEV_MODE_PERIODIC) 314 tick_setup_periodic(dev, 0); 315 } 316 break; 317 } 318 319 if (cpumask_empty(tick_broadcast_mask)) { 320 if (!bc_stopped) 321 clockevents_shutdown(bc); 322 } else if (bc_stopped) { 323 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 324 tick_broadcast_start_periodic(bc); 325 else 326 tick_broadcast_setup_oneshot(bc); 327 } 328out: 329 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 330} 331 332/* 333 * Powerstate information: The system enters/leaves a state, where 334 * affected devices might stop. 335 */ 336void tick_broadcast_on_off(unsigned long reason, int *oncpu) 337{ 338 if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) 339 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 340 "offline CPU #%d\n", *oncpu); 341 else 342 tick_do_broadcast_on_off(&reason); 343} 344 345/* 346 * Set the periodic handler depending on broadcast on/off 347 */ 348void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) 349{ 350 if (!broadcast) 351 dev->event_handler = tick_handle_periodic; 352 else 353 dev->event_handler = tick_handle_periodic_broadcast; 354} 355 356/* 357 * Remove a CPU from broadcasting 358 */ 359void tick_shutdown_broadcast(unsigned int *cpup) 360{ 361 struct clock_event_device *bc; 362 unsigned long flags; 363 unsigned int cpu = *cpup; 364 365 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 366 367 bc = tick_broadcast_device.evtdev; 368 cpumask_clear_cpu(cpu, tick_broadcast_mask); 369 370 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 371 if (bc && cpumask_empty(tick_broadcast_mask)) 372 clockevents_shutdown(bc); 373 } 374 375 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 376} 377 378void tick_suspend_broadcast(void) 379{ 380 struct clock_event_device *bc; 381 unsigned long flags; 382 383 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 384 385 bc = tick_broadcast_device.evtdev; 386 if (bc) 387 clockevents_shutdown(bc); 388 389 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 390} 391 392int tick_resume_broadcast(void) 393{ 394 struct clock_event_device *bc; 395 unsigned long flags; 396 int broadcast = 0; 397 398 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 399 400 bc = tick_broadcast_device.evtdev; 401 402 if (bc) { 403 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); 404 405 switch (tick_broadcast_device.mode) { 406 case TICKDEV_MODE_PERIODIC: 407 if (!cpumask_empty(tick_broadcast_mask)) 408 tick_broadcast_start_periodic(bc); 409 broadcast = cpumask_test_cpu(smp_processor_id(), 410 tick_broadcast_mask); 411 break; 412 case TICKDEV_MODE_ONESHOT: 413 if (!cpumask_empty(tick_broadcast_mask)) 414 broadcast = tick_resume_broadcast_oneshot(bc); 415 break; 416 } 417 } 418 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 419 420 return broadcast; 421} 422 423 424#ifdef CONFIG_TICK_ONESHOT 425 426static cpumask_var_t tick_broadcast_oneshot_mask; 427static cpumask_var_t tick_broadcast_pending_mask; 428static cpumask_var_t tick_broadcast_force_mask; 429 430/* 431 * Exposed for debugging: see timer_list.c 432 */ 433struct cpumask *tick_get_broadcast_oneshot_mask(void) 434{ 435 return tick_broadcast_oneshot_mask; 436} 437 438/* 439 * Called before going idle with interrupts disabled. Checks whether a 440 * broadcast event from the other core is about to happen. We detected 441 * that in tick_broadcast_oneshot_control(). The callsite can use this 442 * to avoid a deep idle transition as we are about to get the 443 * broadcast IPI right away. 444 */ 445int tick_check_broadcast_expired(void) 446{ 447 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); 448} 449 450/* 451 * Set broadcast interrupt affinity 452 */ 453static void tick_broadcast_set_affinity(struct clock_event_device *bc, 454 const struct cpumask *cpumask) 455{ 456 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ)) 457 return; 458 459 if (cpumask_equal(bc->cpumask, cpumask)) 460 return; 461 462 bc->cpumask = cpumask; 463 irq_set_affinity(bc->irq, bc->cpumask); 464} 465 466static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, 467 ktime_t expires, int force) 468{ 469 int ret; 470 471 if (bc->mode != CLOCK_EVT_MODE_ONESHOT) 472 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 473 474 ret = clockevents_program_event(bc, expires, force); 475 if (!ret) 476 tick_broadcast_set_affinity(bc, cpumask_of(cpu)); 477 return ret; 478} 479 480int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 481{ 482 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 483 return 0; 484} 485 486/* 487 * Called from irq_enter() when idle was interrupted to reenable the 488 * per cpu device. 489 */ 490void tick_check_oneshot_broadcast(int cpu) 491{ 492 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { 493 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 494 495 /* 496 * We might be in the middle of switching over from 497 * periodic to oneshot. If the CPU has not yet 498 * switched over, leave the device alone. 499 */ 500 if (td->mode == TICKDEV_MODE_ONESHOT) { 501 clockevents_set_mode(td->evtdev, 502 CLOCK_EVT_MODE_ONESHOT); 503 } 504 } 505} 506 507/* 508 * Handle oneshot mode broadcasting 509 */ 510static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 511{ 512 struct tick_device *td; 513 ktime_t now, next_event; 514 int cpu, next_cpu = 0; 515 516 raw_spin_lock(&tick_broadcast_lock); 517again: 518 dev->next_event.tv64 = KTIME_MAX; 519 next_event.tv64 = KTIME_MAX; 520 cpumask_clear(tmpmask); 521 now = ktime_get(); 522 /* Find all expired events */ 523 for_each_cpu(cpu, tick_broadcast_oneshot_mask) { 524 td = &per_cpu(tick_cpu_device, cpu); 525 if (td->evtdev->next_event.tv64 <= now.tv64) { 526 cpumask_set_cpu(cpu, tmpmask); 527 /* 528 * Mark the remote cpu in the pending mask, so 529 * it can avoid reprogramming the cpu local 530 * timer in tick_broadcast_oneshot_control(). 531 */ 532 cpumask_set_cpu(cpu, tick_broadcast_pending_mask); 533 } else if (td->evtdev->next_event.tv64 < next_event.tv64) { 534 next_event.tv64 = td->evtdev->next_event.tv64; 535 next_cpu = cpu; 536 } 537 } 538 539 /* Take care of enforced broadcast requests */ 540 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); 541 cpumask_clear(tick_broadcast_force_mask); 542 543 /* 544 * Sanity check. Catch the case where we try to broadcast to 545 * offline cpus. 546 */ 547 if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) 548 cpumask_and(tmpmask, tmpmask, cpu_online_mask); 549 550 /* 551 * Wakeup the cpus which have an expired event. 552 */ 553 tick_do_broadcast(tmpmask); 554 555 /* 556 * Two reasons for reprogram: 557 * 558 * - The global event did not expire any CPU local 559 * events. This happens in dyntick mode, as the maximum PIT 560 * delta is quite small. 561 * 562 * - There are pending events on sleeping CPUs which were not 563 * in the event mask 564 */ 565 if (next_event.tv64 != KTIME_MAX) { 566 /* 567 * Rearm the broadcast device. If event expired, 568 * repeat the above 569 */ 570 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0)) 571 goto again; 572 } 573 raw_spin_unlock(&tick_broadcast_lock); 574} 575 576/* 577 * Powerstate information: The system enters/leaves a state, where 578 * affected devices might stop 579 */ 580void tick_broadcast_oneshot_control(unsigned long reason) 581{ 582 struct clock_event_device *bc, *dev; 583 struct tick_device *td; 584 unsigned long flags; 585 ktime_t now; 586 int cpu; 587 588 /* 589 * Periodic mode does not care about the enter/exit of power 590 * states 591 */ 592 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 593 return; 594 595 /* 596 * We are called with preemtion disabled from the depth of the 597 * idle code, so we can't be moved away. 598 */ 599 cpu = smp_processor_id(); 600 td = &per_cpu(tick_cpu_device, cpu); 601 dev = td->evtdev; 602 603 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) 604 return; 605 606 bc = tick_broadcast_device.evtdev; 607 608 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 609 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 610 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); 611 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 612 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 613 /* 614 * We only reprogram the broadcast timer if we 615 * did not mark ourself in the force mask and 616 * if the cpu local event is earlier than the 617 * broadcast event. If the current CPU is in 618 * the force mask, then we are going to be 619 * woken by the IPI right away. 620 */ 621 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && 622 dev->next_event.tv64 < bc->next_event.tv64) 623 tick_broadcast_set_event(bc, cpu, dev->next_event, 1); 624 } 625 } else { 626 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 627 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 628 if (dev->next_event.tv64 == KTIME_MAX) 629 goto out; 630 /* 631 * The cpu which was handling the broadcast 632 * timer marked this cpu in the broadcast 633 * pending mask and fired the broadcast 634 * IPI. So we are going to handle the expired 635 * event anyway via the broadcast IPI 636 * handler. No need to reprogram the timer 637 * with an already expired event. 638 */ 639 if (cpumask_test_and_clear_cpu(cpu, 640 tick_broadcast_pending_mask)) 641 goto out; 642 643 /* 644 * If the pending bit is not set, then we are 645 * either the CPU handling the broadcast 646 * interrupt or we got woken by something else. 647 * 648 * We are not longer in the broadcast mask, so 649 * if the cpu local expiry time is already 650 * reached, we would reprogram the cpu local 651 * timer with an already expired event. 652 * 653 * This can lead to a ping-pong when we return 654 * to idle and therefor rearm the broadcast 655 * timer before the cpu local timer was able 656 * to fire. This happens because the forced 657 * reprogramming makes sure that the event 658 * will happen in the future and depending on 659 * the min_delta setting this might be far 660 * enough out that the ping-pong starts. 661 * 662 * If the cpu local next_event has expired 663 * then we know that the broadcast timer 664 * next_event has expired as well and 665 * broadcast is about to be handled. So we 666 * avoid reprogramming and enforce that the 667 * broadcast handler, which did not run yet, 668 * will invoke the cpu local handler. 669 * 670 * We cannot call the handler directly from 671 * here, because we might be in a NOHZ phase 672 * and we did not go through the irq_enter() 673 * nohz fixups. 674 */ 675 now = ktime_get(); 676 if (dev->next_event.tv64 <= now.tv64) { 677 cpumask_set_cpu(cpu, tick_broadcast_force_mask); 678 goto out; 679 } 680 /* 681 * We got woken by something else. Reprogram 682 * the cpu local timer device. 683 */ 684 tick_program_event(dev->next_event, 1); 685 } 686 } 687out: 688 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 689} 690 691/* 692 * Reset the one shot broadcast for a cpu 693 * 694 * Called with tick_broadcast_lock held 695 */ 696static void tick_broadcast_clear_oneshot(int cpu) 697{ 698 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); 699} 700 701static void tick_broadcast_init_next_event(struct cpumask *mask, 702 ktime_t expires) 703{ 704 struct tick_device *td; 705 int cpu; 706 707 for_each_cpu(cpu, mask) { 708 td = &per_cpu(tick_cpu_device, cpu); 709 if (td->evtdev) 710 td->evtdev->next_event = expires; 711 } 712} 713 714/** 715 * tick_broadcast_setup_oneshot - setup the broadcast device 716 */ 717void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 718{ 719 int cpu = smp_processor_id(); 720 721 /* Set it up only once ! */ 722 if (bc->event_handler != tick_handle_oneshot_broadcast) { 723 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 724 725 bc->event_handler = tick_handle_oneshot_broadcast; 726 727 /* Take the do_timer update */ 728 if (!tick_nohz_full_cpu(cpu)) 729 tick_do_timer_cpu = cpu; 730 731 /* 732 * We must be careful here. There might be other CPUs 733 * waiting for periodic broadcast. We need to set the 734 * oneshot_mask bits for those and program the 735 * broadcast device to fire. 736 */ 737 cpumask_copy(tmpmask, tick_broadcast_mask); 738 cpumask_clear_cpu(cpu, tmpmask); 739 cpumask_or(tick_broadcast_oneshot_mask, 740 tick_broadcast_oneshot_mask, tmpmask); 741 742 if (was_periodic && !cpumask_empty(tmpmask)) { 743 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 744 tick_broadcast_init_next_event(tmpmask, 745 tick_next_period); 746 tick_broadcast_set_event(bc, cpu, tick_next_period, 1); 747 } else 748 bc->next_event.tv64 = KTIME_MAX; 749 } else { 750 /* 751 * The first cpu which switches to oneshot mode sets 752 * the bit for all other cpus which are in the general 753 * (periodic) broadcast mask. So the bit is set and 754 * would prevent the first broadcast enter after this 755 * to program the bc device. 756 */ 757 tick_broadcast_clear_oneshot(cpu); 758 } 759} 760 761/* 762 * Select oneshot operating mode for the broadcast device 763 */ 764void tick_broadcast_switch_to_oneshot(void) 765{ 766 struct clock_event_device *bc; 767 unsigned long flags; 768 769 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 770 771 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 772 bc = tick_broadcast_device.evtdev; 773 if (bc) 774 tick_broadcast_setup_oneshot(bc); 775 776 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 777} 778 779 780/* 781 * Remove a dead CPU from broadcasting 782 */ 783void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 784{ 785 unsigned long flags; 786 unsigned int cpu = *cpup; 787 788 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 789 790 /* 791 * Clear the broadcast masks for the dead cpu, but do not stop 792 * the broadcast device! 793 */ 794 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); 795 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); 796 cpumask_clear_cpu(cpu, tick_broadcast_force_mask); 797 798 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 799} 800 801/* 802 * Check, whether the broadcast device is in one shot mode 803 */ 804int tick_broadcast_oneshot_active(void) 805{ 806 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; 807} 808 809/* 810 * Check whether the broadcast device supports oneshot. 811 */ 812bool tick_broadcast_oneshot_available(void) 813{ 814 struct clock_event_device *bc = tick_broadcast_device.evtdev; 815 816 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; 817} 818 819#endif 820 821void __init tick_broadcast_init(void) 822{ 823 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); 824 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); 825#ifdef CONFIG_TICK_ONESHOT 826 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); 827 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); 828 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); 829#endif 830} 831