1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18
19#include <trace/events/irq.h>
20
21#include "internals.h"
22
23/**
24 *	irq_set_chip - set the irq chip for an irq
25 *	@irq:	irq number
26 *	@chip:	pointer to irq chip description structure
27 */
28int irq_set_chip(unsigned int irq, struct irq_chip *chip)
29{
30	unsigned long flags;
31	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
32
33	if (!desc)
34		return -EINVAL;
35
36	if (!chip)
37		chip = &no_irq_chip;
38
39	desc->irq_data.chip = chip;
40	irq_put_desc_unlock(desc, flags);
41	/*
42	 * For !CONFIG_SPARSE_IRQ make the irq show up in
43	 * allocated_irqs.
44	 */
45	irq_mark_irq(irq);
46	return 0;
47}
48EXPORT_SYMBOL(irq_set_chip);
49
50/**
51 *	irq_set_type - set the irq trigger type for an irq
52 *	@irq:	irq number
53 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
54 */
55int irq_set_irq_type(unsigned int irq, unsigned int type)
56{
57	unsigned long flags;
58	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
59	int ret = 0;
60
61	if (!desc)
62		return -EINVAL;
63
64	type &= IRQ_TYPE_SENSE_MASK;
65	ret = __irq_set_trigger(desc, irq, type);
66	irq_put_desc_busunlock(desc, flags);
67	return ret;
68}
69EXPORT_SYMBOL(irq_set_irq_type);
70
71/**
72 *	irq_set_handler_data - set irq handler data for an irq
73 *	@irq:	Interrupt number
74 *	@data:	Pointer to interrupt specific data
75 *
76 *	Set the hardware irq controller data for an irq
77 */
78int irq_set_handler_data(unsigned int irq, void *data)
79{
80	unsigned long flags;
81	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
82
83	if (!desc)
84		return -EINVAL;
85	desc->irq_data.handler_data = data;
86	irq_put_desc_unlock(desc, flags);
87	return 0;
88}
89EXPORT_SYMBOL(irq_set_handler_data);
90
91/**
92 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
93 *	@irq_base:	Interrupt number base
94 *	@irq_offset:	Interrupt number offset
95 *	@entry:		Pointer to MSI descriptor data
96 *
97 *	Set the MSI descriptor entry for an irq at offset
98 */
99int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
100			 struct msi_desc *entry)
101{
102	unsigned long flags;
103	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
104
105	if (!desc)
106		return -EINVAL;
107	desc->irq_data.msi_desc = entry;
108	if (entry && !irq_offset)
109		entry->irq = irq_base;
110	irq_put_desc_unlock(desc, flags);
111	return 0;
112}
113
114/**
115 *	irq_set_msi_desc - set MSI descriptor data for an irq
116 *	@irq:	Interrupt number
117 *	@entry:	Pointer to MSI descriptor data
118 *
119 *	Set the MSI descriptor entry for an irq
120 */
121int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
122{
123	return irq_set_msi_desc_off(irq, 0, entry);
124}
125
126/**
127 *	irq_set_chip_data - set irq chip data for an irq
128 *	@irq:	Interrupt number
129 *	@data:	Pointer to chip specific data
130 *
131 *	Set the hardware irq chip data for an irq
132 */
133int irq_set_chip_data(unsigned int irq, void *data)
134{
135	unsigned long flags;
136	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
137
138	if (!desc)
139		return -EINVAL;
140	desc->irq_data.chip_data = data;
141	irq_put_desc_unlock(desc, flags);
142	return 0;
143}
144EXPORT_SYMBOL(irq_set_chip_data);
145
146struct irq_data *irq_get_irq_data(unsigned int irq)
147{
148	struct irq_desc *desc = irq_to_desc(irq);
149
150	return desc ? &desc->irq_data : NULL;
151}
152EXPORT_SYMBOL_GPL(irq_get_irq_data);
153
154static void irq_state_clr_disabled(struct irq_desc *desc)
155{
156	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
157}
158
159static void irq_state_set_disabled(struct irq_desc *desc)
160{
161	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
162}
163
164static void irq_state_clr_masked(struct irq_desc *desc)
165{
166	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
167}
168
169static void irq_state_set_masked(struct irq_desc *desc)
170{
171	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
172}
173
174int irq_startup(struct irq_desc *desc, bool resend)
175{
176	int ret = 0;
177
178	irq_state_clr_disabled(desc);
179	desc->depth = 0;
180
181	if (desc->irq_data.chip->irq_startup) {
182		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
183		irq_state_clr_masked(desc);
184	} else {
185		irq_enable(desc);
186	}
187	if (resend)
188		check_irq_resend(desc, desc->irq_data.irq);
189	return ret;
190}
191
192void irq_shutdown(struct irq_desc *desc)
193{
194	irq_state_set_disabled(desc);
195	desc->depth = 1;
196	if (desc->irq_data.chip->irq_shutdown)
197		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
198	else if (desc->irq_data.chip->irq_disable)
199		desc->irq_data.chip->irq_disable(&desc->irq_data);
200	else
201		desc->irq_data.chip->irq_mask(&desc->irq_data);
202	irq_state_set_masked(desc);
203}
204
205void irq_enable(struct irq_desc *desc)
206{
207	irq_state_clr_disabled(desc);
208	if (desc->irq_data.chip->irq_enable)
209		desc->irq_data.chip->irq_enable(&desc->irq_data);
210	else
211		desc->irq_data.chip->irq_unmask(&desc->irq_data);
212	irq_state_clr_masked(desc);
213}
214
215/**
216 * irq_disable - Mark interrupt disabled
217 * @desc:	irq descriptor which should be disabled
218 *
219 * If the chip does not implement the irq_disable callback, we
220 * use a lazy disable approach. That means we mark the interrupt
221 * disabled, but leave the hardware unmasked. That's an
222 * optimization because we avoid the hardware access for the
223 * common case where no interrupt happens after we marked it
224 * disabled. If an interrupt happens, then the interrupt flow
225 * handler masks the line at the hardware level and marks it
226 * pending.
227 */
228void irq_disable(struct irq_desc *desc)
229{
230	irq_state_set_disabled(desc);
231	if (desc->irq_data.chip->irq_disable) {
232		desc->irq_data.chip->irq_disable(&desc->irq_data);
233		irq_state_set_masked(desc);
234	}
235}
236
237void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
238{
239	if (desc->irq_data.chip->irq_enable)
240		desc->irq_data.chip->irq_enable(&desc->irq_data);
241	else
242		desc->irq_data.chip->irq_unmask(&desc->irq_data);
243	cpumask_set_cpu(cpu, desc->percpu_enabled);
244}
245
246void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
247{
248	if (desc->irq_data.chip->irq_disable)
249		desc->irq_data.chip->irq_disable(&desc->irq_data);
250	else
251		desc->irq_data.chip->irq_mask(&desc->irq_data);
252	cpumask_clear_cpu(cpu, desc->percpu_enabled);
253}
254
255static inline void mask_ack_irq(struct irq_desc *desc)
256{
257	if (desc->irq_data.chip->irq_mask_ack)
258		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
259	else {
260		desc->irq_data.chip->irq_mask(&desc->irq_data);
261		if (desc->irq_data.chip->irq_ack)
262			desc->irq_data.chip->irq_ack(&desc->irq_data);
263	}
264	irq_state_set_masked(desc);
265}
266
267void mask_irq(struct irq_desc *desc)
268{
269	if (desc->irq_data.chip->irq_mask) {
270		desc->irq_data.chip->irq_mask(&desc->irq_data);
271		irq_state_set_masked(desc);
272	}
273}
274
275void unmask_irq(struct irq_desc *desc)
276{
277	if (desc->irq_data.chip->irq_unmask) {
278		desc->irq_data.chip->irq_unmask(&desc->irq_data);
279		irq_state_clr_masked(desc);
280	}
281}
282
283void unmask_threaded_irq(struct irq_desc *desc)
284{
285	struct irq_chip *chip = desc->irq_data.chip;
286
287	if (chip->flags & IRQCHIP_EOI_THREADED)
288		chip->irq_eoi(&desc->irq_data);
289
290	if (chip->irq_unmask) {
291		chip->irq_unmask(&desc->irq_data);
292		irq_state_clr_masked(desc);
293	}
294}
295
296/*
297 *	handle_nested_irq - Handle a nested irq from a irq thread
298 *	@irq:	the interrupt number
299 *
300 *	Handle interrupts which are nested into a threaded interrupt
301 *	handler. The handler function is called inside the calling
302 *	threads context.
303 */
304void handle_nested_irq(unsigned int irq)
305{
306	struct irq_desc *desc = irq_to_desc(irq);
307	struct irqaction *action;
308	irqreturn_t action_ret;
309
310	might_sleep();
311
312	raw_spin_lock_irq(&desc->lock);
313
314	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
315	kstat_incr_irqs_this_cpu(irq, desc);
316
317	action = desc->action;
318	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
319		desc->istate |= IRQS_PENDING;
320		goto out_unlock;
321	}
322
323	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
324	raw_spin_unlock_irq(&desc->lock);
325
326	action_ret = action->thread_fn(action->irq, action->dev_id);
327	if (!noirqdebug)
328		note_interrupt(irq, desc, action_ret);
329
330	raw_spin_lock_irq(&desc->lock);
331	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
332
333out_unlock:
334	raw_spin_unlock_irq(&desc->lock);
335}
336EXPORT_SYMBOL_GPL(handle_nested_irq);
337
338static bool irq_check_poll(struct irq_desc *desc)
339{
340	if (!(desc->istate & IRQS_POLL_INPROGRESS))
341		return false;
342	return irq_wait_for_poll(desc);
343}
344
345static bool irq_may_run(struct irq_desc *desc)
346{
347	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
348
349	/*
350	 * If the interrupt is not in progress and is not an armed
351	 * wakeup interrupt, proceed.
352	 */
353	if (!irqd_has_set(&desc->irq_data, mask))
354		return true;
355
356	/*
357	 * If the interrupt is an armed wakeup source, mark it pending
358	 * and suspended, disable it and notify the pm core about the
359	 * event.
360	 */
361	if (irq_pm_check_wakeup(desc))
362		return false;
363
364	/*
365	 * Handle a potential concurrent poll on a different core.
366	 */
367	return irq_check_poll(desc);
368}
369
370/**
371 *	handle_simple_irq - Simple and software-decoded IRQs.
372 *	@irq:	the interrupt number
373 *	@desc:	the interrupt description structure for this irq
374 *
375 *	Simple interrupts are either sent from a demultiplexing interrupt
376 *	handler or come from hardware, where no interrupt hardware control
377 *	is necessary.
378 *
379 *	Note: The caller is expected to handle the ack, clear, mask and
380 *	unmask issues if necessary.
381 */
382void
383handle_simple_irq(unsigned int irq, struct irq_desc *desc)
384{
385	raw_spin_lock(&desc->lock);
386
387	if (!irq_may_run(desc))
388		goto out_unlock;
389
390	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
391	kstat_incr_irqs_this_cpu(irq, desc);
392
393	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
394		desc->istate |= IRQS_PENDING;
395		goto out_unlock;
396	}
397
398	handle_irq_event(desc);
399
400out_unlock:
401	raw_spin_unlock(&desc->lock);
402}
403EXPORT_SYMBOL_GPL(handle_simple_irq);
404
405/*
406 * Called unconditionally from handle_level_irq() and only for oneshot
407 * interrupts from handle_fasteoi_irq()
408 */
409static void cond_unmask_irq(struct irq_desc *desc)
410{
411	/*
412	 * We need to unmask in the following cases:
413	 * - Standard level irq (IRQF_ONESHOT is not set)
414	 * - Oneshot irq which did not wake the thread (caused by a
415	 *   spurious interrupt or a primary handler handling it
416	 *   completely).
417	 */
418	if (!irqd_irq_disabled(&desc->irq_data) &&
419	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
420		unmask_irq(desc);
421}
422
423/**
424 *	handle_level_irq - Level type irq handler
425 *	@irq:	the interrupt number
426 *	@desc:	the interrupt description structure for this irq
427 *
428 *	Level type interrupts are active as long as the hardware line has
429 *	the active level. This may require to mask the interrupt and unmask
430 *	it after the associated handler has acknowledged the device, so the
431 *	interrupt line is back to inactive.
432 */
433void
434handle_level_irq(unsigned int irq, struct irq_desc *desc)
435{
436	raw_spin_lock(&desc->lock);
437	mask_ack_irq(desc);
438
439	if (!irq_may_run(desc))
440		goto out_unlock;
441
442	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
443	kstat_incr_irqs_this_cpu(irq, desc);
444
445	/*
446	 * If its disabled or no action available
447	 * keep it masked and get out of here
448	 */
449	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
450		desc->istate |= IRQS_PENDING;
451		goto out_unlock;
452	}
453
454	handle_irq_event(desc);
455
456	cond_unmask_irq(desc);
457
458out_unlock:
459	raw_spin_unlock(&desc->lock);
460}
461EXPORT_SYMBOL_GPL(handle_level_irq);
462
463#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
464static inline void preflow_handler(struct irq_desc *desc)
465{
466	if (desc->preflow_handler)
467		desc->preflow_handler(&desc->irq_data);
468}
469#else
470static inline void preflow_handler(struct irq_desc *desc) { }
471#endif
472
473static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
474{
475	if (!(desc->istate & IRQS_ONESHOT)) {
476		chip->irq_eoi(&desc->irq_data);
477		return;
478	}
479	/*
480	 * We need to unmask in the following cases:
481	 * - Oneshot irq which did not wake the thread (caused by a
482	 *   spurious interrupt or a primary handler handling it
483	 *   completely).
484	 */
485	if (!irqd_irq_disabled(&desc->irq_data) &&
486	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
487		chip->irq_eoi(&desc->irq_data);
488		unmask_irq(desc);
489	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
490		chip->irq_eoi(&desc->irq_data);
491	}
492}
493
494/**
495 *	handle_fasteoi_irq - irq handler for transparent controllers
496 *	@irq:	the interrupt number
497 *	@desc:	the interrupt description structure for this irq
498 *
499 *	Only a single callback will be issued to the chip: an ->eoi()
500 *	call when the interrupt has been serviced. This enables support
501 *	for modern forms of interrupt handlers, which handle the flow
502 *	details in hardware, transparently.
503 */
504void
505handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
506{
507	struct irq_chip *chip = desc->irq_data.chip;
508
509	raw_spin_lock(&desc->lock);
510
511	if (!irq_may_run(desc))
512		goto out;
513
514	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
515	kstat_incr_irqs_this_cpu(irq, desc);
516
517	/*
518	 * If its disabled or no action available
519	 * then mask it and get out of here:
520	 */
521	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
522		desc->istate |= IRQS_PENDING;
523		mask_irq(desc);
524		goto out;
525	}
526
527	if (desc->istate & IRQS_ONESHOT)
528		mask_irq(desc);
529
530	preflow_handler(desc);
531	handle_irq_event(desc);
532
533	cond_unmask_eoi_irq(desc, chip);
534
535	raw_spin_unlock(&desc->lock);
536	return;
537out:
538	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
539		chip->irq_eoi(&desc->irq_data);
540	raw_spin_unlock(&desc->lock);
541}
542EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
543
544/**
545 *	handle_edge_irq - edge type IRQ handler
546 *	@irq:	the interrupt number
547 *	@desc:	the interrupt description structure for this irq
548 *
549 *	Interrupt occures on the falling and/or rising edge of a hardware
550 *	signal. The occurrence is latched into the irq controller hardware
551 *	and must be acked in order to be reenabled. After the ack another
552 *	interrupt can happen on the same source even before the first one
553 *	is handled by the associated event handler. If this happens it
554 *	might be necessary to disable (mask) the interrupt depending on the
555 *	controller hardware. This requires to reenable the interrupt inside
556 *	of the loop which handles the interrupts which have arrived while
557 *	the handler was running. If all pending interrupts are handled, the
558 *	loop is left.
559 */
560void
561handle_edge_irq(unsigned int irq, struct irq_desc *desc)
562{
563	raw_spin_lock(&desc->lock);
564
565	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
566
567	if (!irq_may_run(desc)) {
568		desc->istate |= IRQS_PENDING;
569		mask_ack_irq(desc);
570		goto out_unlock;
571	}
572
573	/*
574	 * If its disabled or no action available then mask it and get
575	 * out of here.
576	 */
577	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
578		desc->istate |= IRQS_PENDING;
579		mask_ack_irq(desc);
580		goto out_unlock;
581	}
582
583	kstat_incr_irqs_this_cpu(irq, desc);
584
585	/* Start handling the irq */
586	desc->irq_data.chip->irq_ack(&desc->irq_data);
587
588	do {
589		if (unlikely(!desc->action)) {
590			mask_irq(desc);
591			goto out_unlock;
592		}
593
594		/*
595		 * When another irq arrived while we were handling
596		 * one, we could have masked the irq.
597		 * Renable it, if it was not disabled in meantime.
598		 */
599		if (unlikely(desc->istate & IRQS_PENDING)) {
600			if (!irqd_irq_disabled(&desc->irq_data) &&
601			    irqd_irq_masked(&desc->irq_data))
602				unmask_irq(desc);
603		}
604
605		handle_irq_event(desc);
606
607	} while ((desc->istate & IRQS_PENDING) &&
608		 !irqd_irq_disabled(&desc->irq_data));
609
610out_unlock:
611	raw_spin_unlock(&desc->lock);
612}
613EXPORT_SYMBOL(handle_edge_irq);
614
615#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
616/**
617 *	handle_edge_eoi_irq - edge eoi type IRQ handler
618 *	@irq:	the interrupt number
619 *	@desc:	the interrupt description structure for this irq
620 *
621 * Similar as the above handle_edge_irq, but using eoi and w/o the
622 * mask/unmask logic.
623 */
624void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
625{
626	struct irq_chip *chip = irq_desc_get_chip(desc);
627
628	raw_spin_lock(&desc->lock);
629
630	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
631
632	if (!irq_may_run(desc)) {
633		desc->istate |= IRQS_PENDING;
634		goto out_eoi;
635	}
636
637	/*
638	 * If its disabled or no action available then mask it and get
639	 * out of here.
640	 */
641	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
642		desc->istate |= IRQS_PENDING;
643		goto out_eoi;
644	}
645
646	kstat_incr_irqs_this_cpu(irq, desc);
647
648	do {
649		if (unlikely(!desc->action))
650			goto out_eoi;
651
652		handle_irq_event(desc);
653
654	} while ((desc->istate & IRQS_PENDING) &&
655		 !irqd_irq_disabled(&desc->irq_data));
656
657out_eoi:
658	chip->irq_eoi(&desc->irq_data);
659	raw_spin_unlock(&desc->lock);
660}
661#endif
662
663/**
664 *	handle_percpu_irq - Per CPU local irq handler
665 *	@irq:	the interrupt number
666 *	@desc:	the interrupt description structure for this irq
667 *
668 *	Per CPU interrupts on SMP machines without locking requirements
669 */
670void
671handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
672{
673	struct irq_chip *chip = irq_desc_get_chip(desc);
674
675	kstat_incr_irqs_this_cpu(irq, desc);
676
677	if (chip->irq_ack)
678		chip->irq_ack(&desc->irq_data);
679
680	handle_irq_event_percpu(desc, desc->action);
681
682	if (chip->irq_eoi)
683		chip->irq_eoi(&desc->irq_data);
684}
685
686/**
687 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
688 * @irq:	the interrupt number
689 * @desc:	the interrupt description structure for this irq
690 *
691 * Per CPU interrupts on SMP machines without locking requirements. Same as
692 * handle_percpu_irq() above but with the following extras:
693 *
694 * action->percpu_dev_id is a pointer to percpu variables which
695 * contain the real device id for the cpu on which this handler is
696 * called
697 */
698void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
699{
700	struct irq_chip *chip = irq_desc_get_chip(desc);
701	struct irqaction *action = desc->action;
702	void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
703	irqreturn_t res;
704
705	kstat_incr_irqs_this_cpu(irq, desc);
706
707	if (chip->irq_ack)
708		chip->irq_ack(&desc->irq_data);
709
710	trace_irq_handler_entry(irq, action);
711	res = action->handler(irq, dev_id);
712	trace_irq_handler_exit(irq, action, res);
713
714	if (chip->irq_eoi)
715		chip->irq_eoi(&desc->irq_data);
716}
717
718void
719__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
720		  const char *name)
721{
722	unsigned long flags;
723	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
724
725	if (!desc)
726		return;
727
728	if (!handle) {
729		handle = handle_bad_irq;
730	} else {
731		if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
732			goto out;
733	}
734
735	/* Uninstall? */
736	if (handle == handle_bad_irq) {
737		if (desc->irq_data.chip != &no_irq_chip)
738			mask_ack_irq(desc);
739		irq_state_set_disabled(desc);
740		desc->depth = 1;
741	}
742	desc->handle_irq = handle;
743	desc->name = name;
744
745	if (handle != handle_bad_irq && is_chained) {
746		irq_settings_set_noprobe(desc);
747		irq_settings_set_norequest(desc);
748		irq_settings_set_nothread(desc);
749		irq_startup(desc, true);
750	}
751out:
752	irq_put_desc_busunlock(desc, flags);
753}
754EXPORT_SYMBOL_GPL(__irq_set_handler);
755
756void
757irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
758			      irq_flow_handler_t handle, const char *name)
759{
760	irq_set_chip(irq, chip);
761	__irq_set_handler(irq, handle, 0, name);
762}
763EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
764
765void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
766{
767	unsigned long flags;
768	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
769
770	if (!desc)
771		return;
772	irq_settings_clr_and_set(desc, clr, set);
773
774	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
775		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
776	if (irq_settings_has_no_balance_set(desc))
777		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
778	if (irq_settings_is_per_cpu(desc))
779		irqd_set(&desc->irq_data, IRQD_PER_CPU);
780	if (irq_settings_can_move_pcntxt(desc))
781		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
782	if (irq_settings_is_level(desc))
783		irqd_set(&desc->irq_data, IRQD_LEVEL);
784
785	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
786
787	irq_put_desc_unlock(desc, flags);
788}
789EXPORT_SYMBOL_GPL(irq_modify_status);
790
791/**
792 *	irq_cpu_online - Invoke all irq_cpu_online functions.
793 *
794 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
795 *	for each.
796 */
797void irq_cpu_online(void)
798{
799	struct irq_desc *desc;
800	struct irq_chip *chip;
801	unsigned long flags;
802	unsigned int irq;
803
804	for_each_active_irq(irq) {
805		desc = irq_to_desc(irq);
806		if (!desc)
807			continue;
808
809		raw_spin_lock_irqsave(&desc->lock, flags);
810
811		chip = irq_data_get_irq_chip(&desc->irq_data);
812		if (chip && chip->irq_cpu_online &&
813		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
814		     !irqd_irq_disabled(&desc->irq_data)))
815			chip->irq_cpu_online(&desc->irq_data);
816
817		raw_spin_unlock_irqrestore(&desc->lock, flags);
818	}
819}
820
821/**
822 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
823 *
824 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
825 *	for each.
826 */
827void irq_cpu_offline(void)
828{
829	struct irq_desc *desc;
830	struct irq_chip *chip;
831	unsigned long flags;
832	unsigned int irq;
833
834	for_each_active_irq(irq) {
835		desc = irq_to_desc(irq);
836		if (!desc)
837			continue;
838
839		raw_spin_lock_irqsave(&desc->lock, flags);
840
841		chip = irq_data_get_irq_chip(&desc->irq_data);
842		if (chip && chip->irq_cpu_offline &&
843		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
844		     !irqd_irq_disabled(&desc->irq_data)))
845			chip->irq_cpu_offline(&desc->irq_data);
846
847		raw_spin_unlock_irqrestore(&desc->lock, flags);
848	}
849}
850