tick-broadcast.c revision eaa907c546f76222227dfc41784b22588af1e3d7
1/*
2 * linux/kernel/time/tick-broadcast.c
3 *
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22
23#include "tick-internal.h"
24
25/*
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
28 */
29
30static struct tick_device tick_broadcast_device;
31static cpumask_var_t tick_broadcast_mask;
32static cpumask_var_t tmpmask;
33static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34static int tick_broadcast_force;
35
36#ifdef CONFIG_TICK_ONESHOT
37static void tick_broadcast_clear_oneshot(int cpu);
38#else
39static inline void tick_broadcast_clear_oneshot(int cpu) { }
40#endif
41
42/*
43 * Debugging: see timer_list.c
44 */
45struct tick_device *tick_get_broadcast_device(void)
46{
47	return &tick_broadcast_device;
48}
49
50struct cpumask *tick_get_broadcast_mask(void)
51{
52	return tick_broadcast_mask;
53}
54
55/*
56 * Start the device in periodic mode
57 */
58static void tick_broadcast_start_periodic(struct clock_event_device *bc)
59{
60	if (bc)
61		tick_setup_periodic(bc, 1);
62}
63
64/*
65 * Check, if the device can be utilized as broadcast device:
66 */
67int tick_check_broadcast_device(struct clock_event_device *dev)
68{
69	if ((tick_broadcast_device.evtdev &&
70	     tick_broadcast_device.evtdev->rating >= dev->rating) ||
71	     (dev->features & CLOCK_EVT_FEAT_C3STOP))
72		return 0;
73
74	clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
75	tick_broadcast_device.evtdev = dev;
76	if (!cpumask_empty(tick_broadcast_mask))
77		tick_broadcast_start_periodic(dev);
78	return 1;
79}
80
81/*
82 * Check, if the device is the broadcast device
83 */
84int tick_is_broadcast_device(struct clock_event_device *dev)
85{
86	return (dev && tick_broadcast_device.evtdev == dev);
87}
88
89static void err_broadcast(const struct cpumask *mask)
90{
91	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
92}
93
94static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
95{
96	if (!dev->broadcast)
97		dev->broadcast = tick_broadcast;
98	if (!dev->broadcast) {
99		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
100			     dev->name);
101		dev->broadcast = err_broadcast;
102	}
103}
104
105/*
106 * Check, if the device is disfunctional and a place holder, which
107 * needs to be handled by the broadcast device.
108 */
109int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
110{
111	unsigned long flags;
112	int ret = 0;
113
114	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
115
116	/*
117	 * Devices might be registered with both periodic and oneshot
118	 * mode disabled. This signals, that the device needs to be
119	 * operated from the broadcast device and is a placeholder for
120	 * the cpu local device.
121	 */
122	if (!tick_device_is_functional(dev)) {
123		dev->event_handler = tick_handle_periodic;
124		tick_device_setup_broadcast_func(dev);
125		cpumask_set_cpu(cpu, tick_broadcast_mask);
126		tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
127		ret = 1;
128	} else {
129		/*
130		 * When the new device is not affected by the stop
131		 * feature and the cpu is marked in the broadcast mask
132		 * then clear the broadcast bit.
133		 */
134		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
135			int cpu = smp_processor_id();
136			cpumask_clear_cpu(cpu, tick_broadcast_mask);
137			tick_broadcast_clear_oneshot(cpu);
138		} else {
139			tick_device_setup_broadcast_func(dev);
140		}
141	}
142	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
143	return ret;
144}
145
146#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
147int tick_receive_broadcast(void)
148{
149	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
150	struct clock_event_device *evt = td->evtdev;
151
152	if (!evt)
153		return -ENODEV;
154
155	if (!evt->event_handler)
156		return -EINVAL;
157
158	evt->event_handler(evt);
159	return 0;
160}
161#endif
162
163/*
164 * Broadcast the event to the cpus, which are set in the mask (mangled).
165 */
166static void tick_do_broadcast(struct cpumask *mask)
167{
168	int cpu = smp_processor_id();
169	struct tick_device *td;
170
171	/*
172	 * Check, if the current cpu is in the mask
173	 */
174	if (cpumask_test_cpu(cpu, mask)) {
175		cpumask_clear_cpu(cpu, mask);
176		td = &per_cpu(tick_cpu_device, cpu);
177		td->evtdev->event_handler(td->evtdev);
178	}
179
180	if (!cpumask_empty(mask)) {
181		/*
182		 * It might be necessary to actually check whether the devices
183		 * have different broadcast functions. For now, just use the
184		 * one of the first device. This works as long as we have this
185		 * misfeature only on x86 (lapic)
186		 */
187		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
188		td->evtdev->broadcast(mask);
189	}
190}
191
192/*
193 * Periodic broadcast:
194 * - invoke the broadcast handlers
195 */
196static void tick_do_periodic_broadcast(void)
197{
198	raw_spin_lock(&tick_broadcast_lock);
199
200	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
201	tick_do_broadcast(tmpmask);
202
203	raw_spin_unlock(&tick_broadcast_lock);
204}
205
206/*
207 * Event handler for periodic broadcast ticks
208 */
209static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
210{
211	ktime_t next;
212
213	tick_do_periodic_broadcast();
214
215	/*
216	 * The device is in periodic mode. No reprogramming necessary:
217	 */
218	if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
219		return;
220
221	/*
222	 * Setup the next period for devices, which do not have
223	 * periodic mode. We read dev->next_event first and add to it
224	 * when the event already expired. clockevents_program_event()
225	 * sets dev->next_event only when the event is really
226	 * programmed to the device.
227	 */
228	for (next = dev->next_event; ;) {
229		next = ktime_add(next, tick_period);
230
231		if (!clockevents_program_event(dev, next, false))
232			return;
233		tick_do_periodic_broadcast();
234	}
235}
236
237/*
238 * Powerstate information: The system enters/leaves a state, where
239 * affected devices might stop
240 */
241static void tick_do_broadcast_on_off(unsigned long *reason)
242{
243	struct clock_event_device *bc, *dev;
244	struct tick_device *td;
245	unsigned long flags;
246	int cpu, bc_stopped;
247
248	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
249
250	cpu = smp_processor_id();
251	td = &per_cpu(tick_cpu_device, cpu);
252	dev = td->evtdev;
253	bc = tick_broadcast_device.evtdev;
254
255	/*
256	 * Is the device not affected by the powerstate ?
257	 */
258	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
259		goto out;
260
261	if (!tick_device_is_functional(dev))
262		goto out;
263
264	bc_stopped = cpumask_empty(tick_broadcast_mask);
265
266	switch (*reason) {
267	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
268	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
269		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
270			if (tick_broadcast_device.mode ==
271			    TICKDEV_MODE_PERIODIC)
272				clockevents_shutdown(dev);
273		}
274		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
275			tick_broadcast_force = 1;
276		break;
277	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
278		if (!tick_broadcast_force &&
279		    cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
280			if (tick_broadcast_device.mode ==
281			    TICKDEV_MODE_PERIODIC)
282				tick_setup_periodic(dev, 0);
283		}
284		break;
285	}
286
287	if (cpumask_empty(tick_broadcast_mask)) {
288		if (!bc_stopped)
289			clockevents_shutdown(bc);
290	} else if (bc_stopped) {
291		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
292			tick_broadcast_start_periodic(bc);
293		else
294			tick_broadcast_setup_oneshot(bc);
295	}
296out:
297	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
298}
299
300/*
301 * Powerstate information: The system enters/leaves a state, where
302 * affected devices might stop.
303 */
304void tick_broadcast_on_off(unsigned long reason, int *oncpu)
305{
306	if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
307		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
308		       "offline CPU #%d\n", *oncpu);
309	else
310		tick_do_broadcast_on_off(&reason);
311}
312
313/*
314 * Set the periodic handler depending on broadcast on/off
315 */
316void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
317{
318	if (!broadcast)
319		dev->event_handler = tick_handle_periodic;
320	else
321		dev->event_handler = tick_handle_periodic_broadcast;
322}
323
324/*
325 * Remove a CPU from broadcasting
326 */
327void tick_shutdown_broadcast(unsigned int *cpup)
328{
329	struct clock_event_device *bc;
330	unsigned long flags;
331	unsigned int cpu = *cpup;
332
333	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
334
335	bc = tick_broadcast_device.evtdev;
336	cpumask_clear_cpu(cpu, tick_broadcast_mask);
337
338	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
339		if (bc && cpumask_empty(tick_broadcast_mask))
340			clockevents_shutdown(bc);
341	}
342
343	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
344}
345
346void tick_suspend_broadcast(void)
347{
348	struct clock_event_device *bc;
349	unsigned long flags;
350
351	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
352
353	bc = tick_broadcast_device.evtdev;
354	if (bc)
355		clockevents_shutdown(bc);
356
357	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
358}
359
360int tick_resume_broadcast(void)
361{
362	struct clock_event_device *bc;
363	unsigned long flags;
364	int broadcast = 0;
365
366	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
367
368	bc = tick_broadcast_device.evtdev;
369
370	if (bc) {
371		clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
372
373		switch (tick_broadcast_device.mode) {
374		case TICKDEV_MODE_PERIODIC:
375			if (!cpumask_empty(tick_broadcast_mask))
376				tick_broadcast_start_periodic(bc);
377			broadcast = cpumask_test_cpu(smp_processor_id(),
378						     tick_broadcast_mask);
379			break;
380		case TICKDEV_MODE_ONESHOT:
381			if (!cpumask_empty(tick_broadcast_mask))
382				broadcast = tick_resume_broadcast_oneshot(bc);
383			break;
384		}
385	}
386	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
387
388	return broadcast;
389}
390
391
392#ifdef CONFIG_TICK_ONESHOT
393
394static cpumask_var_t tick_broadcast_oneshot_mask;
395static cpumask_var_t tick_broadcast_pending_mask;
396static cpumask_var_t tick_broadcast_force_mask;
397
398/*
399 * Exposed for debugging: see timer_list.c
400 */
401struct cpumask *tick_get_broadcast_oneshot_mask(void)
402{
403	return tick_broadcast_oneshot_mask;
404}
405
406/*
407 * Called before going idle with interrupts disabled. Checks whether a
408 * broadcast event from the other core is about to happen. We detected
409 * that in tick_broadcast_oneshot_control(). The callsite can use this
410 * to avoid a deep idle transition as we are about to get the
411 * broadcast IPI right away.
412 */
413int tick_check_broadcast_expired(void)
414{
415	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
416}
417
418/*
419 * Set broadcast interrupt affinity
420 */
421static void tick_broadcast_set_affinity(struct clock_event_device *bc,
422					const struct cpumask *cpumask)
423{
424	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
425		return;
426
427	if (cpumask_equal(bc->cpumask, cpumask))
428		return;
429
430	bc->cpumask = cpumask;
431	irq_set_affinity(bc->irq, bc->cpumask);
432}
433
434static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
435				    ktime_t expires, int force)
436{
437	int ret;
438
439	if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
440		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
441
442	ret = clockevents_program_event(bc, expires, force);
443	if (!ret)
444		tick_broadcast_set_affinity(bc, cpumask_of(cpu));
445	return ret;
446}
447
448int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
449{
450	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
451	return 0;
452}
453
454/*
455 * Called from irq_enter() when idle was interrupted to reenable the
456 * per cpu device.
457 */
458void tick_check_oneshot_broadcast(int cpu)
459{
460	if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
461		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
462
463		clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
464	}
465}
466
467/*
468 * Handle oneshot mode broadcasting
469 */
470static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
471{
472	struct tick_device *td;
473	ktime_t now, next_event;
474	int cpu, next_cpu = 0;
475
476	raw_spin_lock(&tick_broadcast_lock);
477again:
478	dev->next_event.tv64 = KTIME_MAX;
479	next_event.tv64 = KTIME_MAX;
480	cpumask_clear(tmpmask);
481	now = ktime_get();
482	/* Find all expired events */
483	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
484		td = &per_cpu(tick_cpu_device, cpu);
485		if (td->evtdev->next_event.tv64 <= now.tv64) {
486			cpumask_set_cpu(cpu, tmpmask);
487			/*
488			 * Mark the remote cpu in the pending mask, so
489			 * it can avoid reprogramming the cpu local
490			 * timer in tick_broadcast_oneshot_control().
491			 */
492			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
493		} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
494			next_event.tv64 = td->evtdev->next_event.tv64;
495			next_cpu = cpu;
496		}
497	}
498
499	/* Take care of enforced broadcast requests */
500	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
501	cpumask_clear(tick_broadcast_force_mask);
502
503	/*
504	 * Wakeup the cpus which have an expired event.
505	 */
506	tick_do_broadcast(tmpmask);
507
508	/*
509	 * Two reasons for reprogram:
510	 *
511	 * - The global event did not expire any CPU local
512	 * events. This happens in dyntick mode, as the maximum PIT
513	 * delta is quite small.
514	 *
515	 * - There are pending events on sleeping CPUs which were not
516	 * in the event mask
517	 */
518	if (next_event.tv64 != KTIME_MAX) {
519		/*
520		 * Rearm the broadcast device. If event expired,
521		 * repeat the above
522		 */
523		if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
524			goto again;
525	}
526	raw_spin_unlock(&tick_broadcast_lock);
527}
528
529/*
530 * Powerstate information: The system enters/leaves a state, where
531 * affected devices might stop
532 */
533void tick_broadcast_oneshot_control(unsigned long reason)
534{
535	struct clock_event_device *bc, *dev;
536	struct tick_device *td;
537	unsigned long flags;
538	ktime_t now;
539	int cpu;
540
541	/*
542	 * Periodic mode does not care about the enter/exit of power
543	 * states
544	 */
545	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
546		return;
547
548	/*
549	 * We are called with preemtion disabled from the depth of the
550	 * idle code, so we can't be moved away.
551	 */
552	cpu = smp_processor_id();
553	td = &per_cpu(tick_cpu_device, cpu);
554	dev = td->evtdev;
555
556	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
557		return;
558
559	bc = tick_broadcast_device.evtdev;
560
561	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
562	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
563		WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
564		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
565			clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
566			/*
567			 * We only reprogram the broadcast timer if we
568			 * did not mark ourself in the force mask and
569			 * if the cpu local event is earlier than the
570			 * broadcast event. If the current CPU is in
571			 * the force mask, then we are going to be
572			 * woken by the IPI right away.
573			 */
574			if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
575			    dev->next_event.tv64 < bc->next_event.tv64)
576				tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
577		}
578	} else {
579		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
580			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
581			if (dev->next_event.tv64 == KTIME_MAX)
582				goto out;
583			/*
584			 * The cpu which was handling the broadcast
585			 * timer marked this cpu in the broadcast
586			 * pending mask and fired the broadcast
587			 * IPI. So we are going to handle the expired
588			 * event anyway via the broadcast IPI
589			 * handler. No need to reprogram the timer
590			 * with an already expired event.
591			 */
592			if (cpumask_test_and_clear_cpu(cpu,
593				       tick_broadcast_pending_mask))
594				goto out;
595
596			/*
597			 * If the pending bit is not set, then we are
598			 * either the CPU handling the broadcast
599			 * interrupt or we got woken by something else.
600			 *
601			 * We are not longer in the broadcast mask, so
602			 * if the cpu local expiry time is already
603			 * reached, we would reprogram the cpu local
604			 * timer with an already expired event.
605			 *
606			 * This can lead to a ping-pong when we return
607			 * to idle and therefor rearm the broadcast
608			 * timer before the cpu local timer was able
609			 * to fire. This happens because the forced
610			 * reprogramming makes sure that the event
611			 * will happen in the future and depending on
612			 * the min_delta setting this might be far
613			 * enough out that the ping-pong starts.
614			 *
615			 * If the cpu local next_event has expired
616			 * then we know that the broadcast timer
617			 * next_event has expired as well and
618			 * broadcast is about to be handled. So we
619			 * avoid reprogramming and enforce that the
620			 * broadcast handler, which did not run yet,
621			 * will invoke the cpu local handler.
622			 *
623			 * We cannot call the handler directly from
624			 * here, because we might be in a NOHZ phase
625			 * and we did not go through the irq_enter()
626			 * nohz fixups.
627			 */
628			now = ktime_get();
629			if (dev->next_event.tv64 <= now.tv64) {
630				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
631				goto out;
632			}
633			/*
634			 * We got woken by something else. Reprogram
635			 * the cpu local timer device.
636			 */
637			tick_program_event(dev->next_event, 1);
638		}
639	}
640out:
641	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
642}
643
644/*
645 * Reset the one shot broadcast for a cpu
646 *
647 * Called with tick_broadcast_lock held
648 */
649static void tick_broadcast_clear_oneshot(int cpu)
650{
651	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
652}
653
654static void tick_broadcast_init_next_event(struct cpumask *mask,
655					   ktime_t expires)
656{
657	struct tick_device *td;
658	int cpu;
659
660	for_each_cpu(cpu, mask) {
661		td = &per_cpu(tick_cpu_device, cpu);
662		if (td->evtdev)
663			td->evtdev->next_event = expires;
664	}
665}
666
667/**
668 * tick_broadcast_setup_oneshot - setup the broadcast device
669 */
670void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
671{
672	int cpu = smp_processor_id();
673
674	/* Set it up only once ! */
675	if (bc->event_handler != tick_handle_oneshot_broadcast) {
676		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
677
678		bc->event_handler = tick_handle_oneshot_broadcast;
679
680		/* Take the do_timer update */
681		tick_do_timer_cpu = cpu;
682
683		/*
684		 * We must be careful here. There might be other CPUs
685		 * waiting for periodic broadcast. We need to set the
686		 * oneshot_mask bits for those and program the
687		 * broadcast device to fire.
688		 */
689		cpumask_copy(tmpmask, tick_broadcast_mask);
690		cpumask_clear_cpu(cpu, tmpmask);
691		cpumask_or(tick_broadcast_oneshot_mask,
692			   tick_broadcast_oneshot_mask, tmpmask);
693
694		if (was_periodic && !cpumask_empty(tmpmask)) {
695			clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
696			tick_broadcast_init_next_event(tmpmask,
697						       tick_next_period);
698			tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
699		} else
700			bc->next_event.tv64 = KTIME_MAX;
701	} else {
702		/*
703		 * The first cpu which switches to oneshot mode sets
704		 * the bit for all other cpus which are in the general
705		 * (periodic) broadcast mask. So the bit is set and
706		 * would prevent the first broadcast enter after this
707		 * to program the bc device.
708		 */
709		tick_broadcast_clear_oneshot(cpu);
710	}
711}
712
713/*
714 * Select oneshot operating mode for the broadcast device
715 */
716void tick_broadcast_switch_to_oneshot(void)
717{
718	struct clock_event_device *bc;
719	unsigned long flags;
720
721	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
722
723	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
724	bc = tick_broadcast_device.evtdev;
725	if (bc)
726		tick_broadcast_setup_oneshot(bc);
727
728	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
729}
730
731
732/*
733 * Remove a dead CPU from broadcasting
734 */
735void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
736{
737	unsigned long flags;
738	unsigned int cpu = *cpup;
739
740	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
741
742	/*
743	 * Clear the broadcast mask flag for the dead cpu, but do not
744	 * stop the broadcast device!
745	 */
746	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
747
748	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
749}
750
751/*
752 * Check, whether the broadcast device is in one shot mode
753 */
754int tick_broadcast_oneshot_active(void)
755{
756	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
757}
758
759/*
760 * Check whether the broadcast device supports oneshot.
761 */
762bool tick_broadcast_oneshot_available(void)
763{
764	struct clock_event_device *bc = tick_broadcast_device.evtdev;
765
766	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
767}
768
769#endif
770
771void __init tick_broadcast_init(void)
772{
773	alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
774	alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
775#ifdef CONFIG_TICK_ONESHOT
776	alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
777	alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
778	alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
779#endif
780}
781