tick-broadcast.c revision 18de5bc4c1f1f1fa5e14f354a7603bd6e9d4e3b6
1/* 2 * linux/kernel/time/tick-broadcast.c 3 * 4 * This file contains functions which emulate a local clock-event 5 * device via a broadcast event source. 6 * 7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 10 * 11 * This code is licenced under the GPL version 2. For details see 12 * kernel-base/COPYING. 13 */ 14#include <linux/cpu.h> 15#include <linux/err.h> 16#include <linux/hrtimer.h> 17#include <linux/irq.h> 18#include <linux/percpu.h> 19#include <linux/profile.h> 20#include <linux/sched.h> 21#include <linux/tick.h> 22 23#include "tick-internal.h" 24 25/* 26 * Broadcast support for broken x86 hardware, where the local apic 27 * timer stops in C3 state. 28 */ 29 30struct tick_device tick_broadcast_device; 31static cpumask_t tick_broadcast_mask; 32static DEFINE_SPINLOCK(tick_broadcast_lock); 33 34/* 35 * Debugging: see timer_list.c 36 */ 37struct tick_device *tick_get_broadcast_device(void) 38{ 39 return &tick_broadcast_device; 40} 41 42cpumask_t *tick_get_broadcast_mask(void) 43{ 44 return &tick_broadcast_mask; 45} 46 47/* 48 * Start the device in periodic mode 49 */ 50static void tick_broadcast_start_periodic(struct clock_event_device *bc) 51{ 52 if (bc) 53 tick_setup_periodic(bc, 1); 54} 55 56/* 57 * Check, if the device can be utilized as broadcast device: 58 */ 59int tick_check_broadcast_device(struct clock_event_device *dev) 60{ 61 if (tick_broadcast_device.evtdev || 62 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 63 return 0; 64 65 clockevents_exchange_device(NULL, dev); 66 tick_broadcast_device.evtdev = dev; 67 if (!cpus_empty(tick_broadcast_mask)) 68 tick_broadcast_start_periodic(dev); 69 return 1; 70} 71 72/* 73 * Check, if the device is the broadcast device 74 */ 75int tick_is_broadcast_device(struct clock_event_device *dev) 76{ 77 return (dev && tick_broadcast_device.evtdev == dev); 78} 79 80/* 81 * Check, if the device is disfunctional and a place holder, which 82 * needs to be handled by the broadcast device. 83 */ 84int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) 85{ 86 unsigned long flags; 87 int ret = 0; 88 89 spin_lock_irqsave(&tick_broadcast_lock, flags); 90 91 /* 92 * Devices might be registered with both periodic and oneshot 93 * mode disabled. This signals, that the device needs to be 94 * operated from the broadcast device and is a placeholder for 95 * the cpu local device. 96 */ 97 if (!tick_device_is_functional(dev)) { 98 dev->event_handler = tick_handle_periodic; 99 cpu_set(cpu, tick_broadcast_mask); 100 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 101 ret = 1; 102 } 103 104 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 105 return ret; 106} 107 108/* 109 * Broadcast the event to the cpus, which are set in the mask 110 */ 111int tick_do_broadcast(cpumask_t mask) 112{ 113 int ret = 0, cpu = smp_processor_id(); 114 struct tick_device *td; 115 116 /* 117 * Check, if the current cpu is in the mask 118 */ 119 if (cpu_isset(cpu, mask)) { 120 cpu_clear(cpu, mask); 121 td = &per_cpu(tick_cpu_device, cpu); 122 td->evtdev->event_handler(td->evtdev); 123 ret = 1; 124 } 125 126 if (!cpus_empty(mask)) { 127 /* 128 * It might be necessary to actually check whether the devices 129 * have different broadcast functions. For now, just use the 130 * one of the first device. This works as long as we have this 131 * misfeature only on x86 (lapic) 132 */ 133 cpu = first_cpu(mask); 134 td = &per_cpu(tick_cpu_device, cpu); 135 td->evtdev->broadcast(mask); 136 ret = 1; 137 } 138 return ret; 139} 140 141/* 142 * Periodic broadcast: 143 * - invoke the broadcast handlers 144 */ 145static void tick_do_periodic_broadcast(void) 146{ 147 cpumask_t mask; 148 149 spin_lock(&tick_broadcast_lock); 150 151 cpus_and(mask, cpu_online_map, tick_broadcast_mask); 152 tick_do_broadcast(mask); 153 154 spin_unlock(&tick_broadcast_lock); 155} 156 157/* 158 * Event handler for periodic broadcast ticks 159 */ 160static void tick_handle_periodic_broadcast(struct clock_event_device *dev) 161{ 162 dev->next_event.tv64 = KTIME_MAX; 163 164 tick_do_periodic_broadcast(); 165 166 /* 167 * The device is in periodic mode. No reprogramming necessary: 168 */ 169 if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 170 return; 171 172 /* 173 * Setup the next period for devices, which do not have 174 * periodic mode: 175 */ 176 for (;;) { 177 ktime_t next = ktime_add(dev->next_event, tick_period); 178 179 if (!clockevents_program_event(dev, next, ktime_get())) 180 return; 181 tick_do_periodic_broadcast(); 182 } 183} 184 185/* 186 * Powerstate information: The system enters/leaves a state, where 187 * affected devices might stop 188 */ 189static void tick_do_broadcast_on_off(void *why) 190{ 191 struct clock_event_device *bc, *dev; 192 struct tick_device *td; 193 unsigned long flags, *reason = why; 194 int cpu; 195 196 spin_lock_irqsave(&tick_broadcast_lock, flags); 197 198 cpu = smp_processor_id(); 199 td = &per_cpu(tick_cpu_device, cpu); 200 dev = td->evtdev; 201 bc = tick_broadcast_device.evtdev; 202 203 /* 204 * Is the device in broadcast mode forever or is it not 205 * affected by the powerstate ? 206 */ 207 if (!dev || !tick_device_is_functional(dev) || 208 !(dev->features & CLOCK_EVT_FEAT_C3STOP)) 209 goto out; 210 211 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) { 212 if (!cpu_isset(cpu, tick_broadcast_mask)) { 213 cpu_set(cpu, tick_broadcast_mask); 214 if (td->mode == TICKDEV_MODE_PERIODIC) 215 clockevents_set_mode(dev, 216 CLOCK_EVT_MODE_SHUTDOWN); 217 } 218 } else { 219 if (cpu_isset(cpu, tick_broadcast_mask)) { 220 cpu_clear(cpu, tick_broadcast_mask); 221 if (td->mode == TICKDEV_MODE_PERIODIC) 222 tick_setup_periodic(dev, 0); 223 } 224 } 225 226 if (cpus_empty(tick_broadcast_mask)) 227 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 228 else { 229 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 230 tick_broadcast_start_periodic(bc); 231 else 232 tick_broadcast_setup_oneshot(bc); 233 } 234out: 235 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 236} 237 238/* 239 * Powerstate information: The system enters/leaves a state, where 240 * affected devices might stop. 241 */ 242void tick_broadcast_on_off(unsigned long reason, int *oncpu) 243{ 244 int cpu = get_cpu(); 245 246 if (!cpu_isset(*oncpu, cpu_online_map)) { 247 printk(KERN_ERR "tick-braodcast: ignoring broadcast for " 248 "offline CPU #%d\n", *oncpu); 249 } else { 250 251 if (cpu == *oncpu) 252 tick_do_broadcast_on_off(&reason); 253 else 254 smp_call_function_single(*oncpu, 255 tick_do_broadcast_on_off, 256 &reason, 1, 1); 257 } 258 put_cpu(); 259} 260 261/* 262 * Set the periodic handler depending on broadcast on/off 263 */ 264void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) 265{ 266 if (!broadcast) 267 dev->event_handler = tick_handle_periodic; 268 else 269 dev->event_handler = tick_handle_periodic_broadcast; 270} 271 272/* 273 * Remove a CPU from broadcasting 274 */ 275void tick_shutdown_broadcast(unsigned int *cpup) 276{ 277 struct clock_event_device *bc; 278 unsigned long flags; 279 unsigned int cpu = *cpup; 280 281 spin_lock_irqsave(&tick_broadcast_lock, flags); 282 283 bc = tick_broadcast_device.evtdev; 284 cpu_clear(cpu, tick_broadcast_mask); 285 286 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 287 if (bc && cpus_empty(tick_broadcast_mask)) 288 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 289 } 290 291 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 292} 293 294void tick_suspend_broadcast(void) 295{ 296 struct clock_event_device *bc; 297 unsigned long flags; 298 299 spin_lock_irqsave(&tick_broadcast_lock, flags); 300 301 bc = tick_broadcast_device.evtdev; 302 if (bc) 303 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 304 305 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 306} 307 308int tick_resume_broadcast(void) 309{ 310 struct clock_event_device *bc; 311 unsigned long flags; 312 int broadcast = 0; 313 314 spin_lock_irqsave(&tick_broadcast_lock, flags); 315 316 bc = tick_broadcast_device.evtdev; 317 318 if (bc) { 319 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); 320 321 switch (tick_broadcast_device.mode) { 322 case TICKDEV_MODE_PERIODIC: 323 if(!cpus_empty(tick_broadcast_mask)) 324 tick_broadcast_start_periodic(bc); 325 broadcast = cpu_isset(smp_processor_id(), 326 tick_broadcast_mask); 327 break; 328 case TICKDEV_MODE_ONESHOT: 329 broadcast = tick_resume_broadcast_oneshot(bc); 330 break; 331 } 332 } 333 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 334 335 return broadcast; 336} 337 338 339#ifdef CONFIG_TICK_ONESHOT 340 341static cpumask_t tick_broadcast_oneshot_mask; 342 343/* 344 * Debugging: see timer_list.c 345 */ 346cpumask_t *tick_get_broadcast_oneshot_mask(void) 347{ 348 return &tick_broadcast_oneshot_mask; 349} 350 351static int tick_broadcast_set_event(ktime_t expires, int force) 352{ 353 struct clock_event_device *bc = tick_broadcast_device.evtdev; 354 ktime_t now = ktime_get(); 355 int res; 356 357 for(;;) { 358 res = clockevents_program_event(bc, expires, now); 359 if (!res || !force) 360 return res; 361 now = ktime_get(); 362 expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); 363 } 364} 365 366int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 367{ 368 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 369 370 if(!cpus_empty(tick_broadcast_oneshot_mask)) 371 tick_broadcast_set_event(ktime_get(), 1); 372 373 return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask); 374} 375 376/* 377 * Reprogram the broadcast device: 378 * 379 * Called with tick_broadcast_lock held and interrupts disabled. 380 */ 381static int tick_broadcast_reprogram(void) 382{ 383 ktime_t expires = { .tv64 = KTIME_MAX }; 384 struct tick_device *td; 385 int cpu; 386 387 /* 388 * Find the event which expires next: 389 */ 390 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 391 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { 392 td = &per_cpu(tick_cpu_device, cpu); 393 if (td->evtdev->next_event.tv64 < expires.tv64) 394 expires = td->evtdev->next_event; 395 } 396 397 if (expires.tv64 == KTIME_MAX) 398 return 0; 399 400 return tick_broadcast_set_event(expires, 0); 401} 402 403/* 404 * Handle oneshot mode broadcasting 405 */ 406static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 407{ 408 struct tick_device *td; 409 cpumask_t mask; 410 ktime_t now; 411 int cpu; 412 413 spin_lock(&tick_broadcast_lock); 414again: 415 dev->next_event.tv64 = KTIME_MAX; 416 mask = CPU_MASK_NONE; 417 now = ktime_get(); 418 /* Find all expired events */ 419 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 420 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { 421 td = &per_cpu(tick_cpu_device, cpu); 422 if (td->evtdev->next_event.tv64 <= now.tv64) 423 cpu_set(cpu, mask); 424 } 425 426 /* 427 * Wakeup the cpus which have an expired event. The broadcast 428 * device is reprogrammed in the return from idle code. 429 */ 430 if (!tick_do_broadcast(mask)) { 431 /* 432 * The global event did not expire any CPU local 433 * events. This happens in dyntick mode, as the 434 * maximum PIT delta is quite small. 435 */ 436 if (tick_broadcast_reprogram()) 437 goto again; 438 } 439 spin_unlock(&tick_broadcast_lock); 440} 441 442/* 443 * Powerstate information: The system enters/leaves a state, where 444 * affected devices might stop 445 */ 446void tick_broadcast_oneshot_control(unsigned long reason) 447{ 448 struct clock_event_device *bc, *dev; 449 struct tick_device *td; 450 unsigned long flags; 451 int cpu; 452 453 spin_lock_irqsave(&tick_broadcast_lock, flags); 454 455 /* 456 * Periodic mode does not care about the enter/exit of power 457 * states 458 */ 459 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 460 goto out; 461 462 bc = tick_broadcast_device.evtdev; 463 cpu = smp_processor_id(); 464 td = &per_cpu(tick_cpu_device, cpu); 465 dev = td->evtdev; 466 467 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) 468 goto out; 469 470 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 471 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 472 cpu_set(cpu, tick_broadcast_oneshot_mask); 473 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 474 if (dev->next_event.tv64 < bc->next_event.tv64) 475 tick_broadcast_set_event(dev->next_event, 1); 476 } 477 } else { 478 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 479 cpu_clear(cpu, tick_broadcast_oneshot_mask); 480 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 481 if (dev->next_event.tv64 != KTIME_MAX) 482 tick_program_event(dev->next_event, 1); 483 } 484 } 485 486out: 487 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 488} 489 490/** 491 * tick_broadcast_setup_highres - setup the broadcast device for highres 492 */ 493void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 494{ 495 if (bc->mode != CLOCK_EVT_MODE_ONESHOT) { 496 bc->event_handler = tick_handle_oneshot_broadcast; 497 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 498 bc->next_event.tv64 = KTIME_MAX; 499 } 500} 501 502/* 503 * Select oneshot operating mode for the broadcast device 504 */ 505void tick_broadcast_switch_to_oneshot(void) 506{ 507 struct clock_event_device *bc; 508 unsigned long flags; 509 510 spin_lock_irqsave(&tick_broadcast_lock, flags); 511 512 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 513 bc = tick_broadcast_device.evtdev; 514 if (bc) 515 tick_broadcast_setup_oneshot(bc); 516 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 517} 518 519 520/* 521 * Remove a dead CPU from broadcasting 522 */ 523void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 524{ 525 struct clock_event_device *bc; 526 unsigned long flags; 527 unsigned int cpu = *cpup; 528 529 spin_lock_irqsave(&tick_broadcast_lock, flags); 530 531 bc = tick_broadcast_device.evtdev; 532 cpu_clear(cpu, tick_broadcast_oneshot_mask); 533 534 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) { 535 if (bc && cpus_empty(tick_broadcast_oneshot_mask)) 536 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 537 } 538 539 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 540} 541 542#endif 543