hardirq.h revision dde4b2b5f4ed275250488dabdaf282d9c6e7e2b8
1#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
4#include <linux/preempt.h>
5#include <linux/smp_lock.h>
6#include <linux/lockdep.h>
7#include <asm/hardirq.h>
8#include <asm/system.h>
9
10/*
11 * We put the hardirq and softirq counter into the preemption
12 * counter. The bitmask has the following meaning:
13 *
14 * - bits 0-7 are the preemption count (max preemption depth: 256)
15 * - bits 8-15 are the softirq count (max # of softirqs: 256)
16 *
17 * The hardirq count can be overridden per architecture, the default is:
18 *
19 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
20 * - ( bit 28 is the PREEMPT_ACTIVE flag. )
21 *
22 * PREEMPT_MASK: 0x000000ff
23 * SOFTIRQ_MASK: 0x0000ff00
24 * HARDIRQ_MASK: 0x0fff0000
25 */
26#define PREEMPT_BITS	8
27#define SOFTIRQ_BITS	8
28
29#ifndef HARDIRQ_BITS
30#define HARDIRQ_BITS	12
31
32#ifndef MAX_HARDIRQS_PER_CPU
33#define MAX_HARDIRQS_PER_CPU NR_IRQS
34#endif
35
36/*
37 * The hardirq mask has to be large enough to have space for potentially
38 * all IRQ sources in the system nesting on a single CPU.
39 */
40#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
41# error HARDIRQ_BITS is too low!
42#endif
43#endif
44
45#define PREEMPT_SHIFT	0
46#define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS)
47#define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS)
48
49#define __IRQ_MASK(x)	((1UL << (x))-1)
50
51#define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
52#define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
53#define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
54
55#define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT)
56#define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT)
57#define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
58
59#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
60#error PREEMPT_ACTIVE is too low!
61#endif
62
63#define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
64#define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
65#define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
66
67/*
68 * Are we doing bottom half or hardware interrupt processing?
69 * Are we in a softirq context? Interrupt context?
70 */
71#define in_irq()		(hardirq_count())
72#define in_softirq()		(softirq_count())
73#define in_interrupt()		(irq_count())
74
75#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
76# define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
77#else
78# define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != 0)
79#endif
80
81#ifdef CONFIG_PREEMPT
82# define preemptible()	(preempt_count() == 0 && !irqs_disabled())
83# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
84#else
85# define preemptible()	0
86# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
87#endif
88
89#ifdef CONFIG_SMP
90extern void synchronize_irq(unsigned int irq);
91#else
92# define synchronize_irq(irq)	barrier()
93#endif
94
95struct task_struct;
96
97#ifndef CONFIG_VIRT_CPU_ACCOUNTING
98static inline void account_system_vtime(struct task_struct *tsk)
99{
100}
101#endif
102
103/*
104 * It is safe to do non-atomic ops on ->hardirq_context,
105 * because NMI handlers may not preempt and the ops are
106 * always balanced, so the interrupted value of ->hardirq_context
107 * will always be restored.
108 */
109extern void irq_enter(void);
110
111/*
112 * Exit irq context without processing softirqs:
113 */
114#define __irq_exit()					\
115	do {						\
116		trace_hardirq_exit();			\
117		account_system_vtime(current);		\
118		sub_preempt_count(HARDIRQ_OFFSET);	\
119	} while (0)
120
121/*
122 * Exit irq context and process softirqs if needed:
123 */
124extern void irq_exit(void);
125
126#define nmi_enter()		do { lockdep_off(); irq_enter(); } while (0)
127#define nmi_exit()		do { __irq_exit(); lockdep_on(); } while (0)
128
129#endif /* LINUX_HARDIRQ_H */
130