hardirq.h revision 2a7b8df04c11a70105c1abe67d006455d3bdc944
1#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
4#include <linux/preempt.h>
5#include <linux/smp_lock.h>
6#include <linux/lockdep.h>
7#include <linux/ftrace_irq.h>
8#include <asm/hardirq.h>
9#include <asm/system.h>
10
11/*
12 * We put the hardirq and softirq counter into the preemption
13 * counter. The bitmask has the following meaning:
14 *
15 * - bits 0-7 are the preemption count (max preemption depth: 256)
16 * - bits 8-15 are the softirq count (max # of softirqs: 256)
17 *
18 * The hardirq count can in theory reach the same as NR_IRQS.
19 * In reality, the number of nested IRQS is limited to the stack
20 * size as well. For archs with over 1000 IRQS it is not practical
21 * to expect that they will all nest. We give a max of 10 bits for
22 * hardirq nesting. An arch may choose to give less than 10 bits.
23 * m68k expects it to be 8.
24 *
25 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
26 * - bit 26 is the NMI_MASK
27 * - bit 28 is the PREEMPT_ACTIVE flag
28 *
29 * PREEMPT_MASK: 0x000000ff
30 * SOFTIRQ_MASK: 0x0000ff00
31 * HARDIRQ_MASK: 0x03ff0000
32 *     NMI_MASK: 0x04000000
33 */
34#define PREEMPT_BITS	8
35#define SOFTIRQ_BITS	8
36#define NMI_BITS	1
37
38#define MAX_HARDIRQ_BITS 10
39
40#ifndef HARDIRQ_BITS
41# define HARDIRQ_BITS	MAX_HARDIRQ_BITS
42#endif
43
44#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
45#error HARDIRQ_BITS too high!
46#endif
47
48#define PREEMPT_SHIFT	0
49#define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS)
50#define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS)
51#define NMI_SHIFT	(HARDIRQ_SHIFT + HARDIRQ_BITS)
52
53#define __IRQ_MASK(x)	((1UL << (x))-1)
54
55#define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
56#define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
57#define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
58#define NMI_MASK	(__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
59
60#define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT)
61#define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT)
62#define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
63#define NMI_OFFSET	(1UL << NMI_SHIFT)
64
65#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
66#error PREEMPT_ACTIVE is too low!
67#endif
68
69#define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
70#define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
71#define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
72				 | NMI_MASK))
73
74/*
75 * Are we doing bottom half or hardware interrupt processing?
76 * Are we in a softirq context? Interrupt context?
77 */
78#define in_irq()		(hardirq_count())
79#define in_softirq()		(softirq_count())
80#define in_interrupt()		(irq_count())
81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi()	(preempt_count() & NMI_MASK)
86
87#if defined(CONFIG_PREEMPT)
88# define PREEMPT_INATOMIC_BASE kernel_locked()
89# define PREEMPT_CHECK_OFFSET 1
90#else
91# define PREEMPT_INATOMIC_BASE 0
92# define PREEMPT_CHECK_OFFSET 0
93#endif
94
95/*
96 * Are we running in atomic context?  WARNING: this macro cannot
97 * always detect atomic context; in particular, it cannot know about
98 * held spinlocks in non-preemptible kernels.  Thus it should not be
99 * used in the general case to determine whether sleeping is possible.
100 * Do not use in_atomic() in driver code.
101 */
102#define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE)
103
104/*
105 * Check whether we were atomic before we did preempt_disable():
106 * (used by the scheduler, *after* releasing the kernel lock)
107 */
108#define in_atomic_preempt_off() \
109		((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
110
111#ifdef CONFIG_PREEMPT
112# define preemptible()	(preempt_count() == 0 && !irqs_disabled())
113# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
114#else
115# define preemptible()	0
116# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
117#endif
118
119#ifdef CONFIG_SMP
120extern void synchronize_irq(unsigned int irq);
121#else
122# define synchronize_irq(irq)	barrier()
123#endif
124
125struct task_struct;
126
127#ifndef CONFIG_VIRT_CPU_ACCOUNTING
128static inline void account_system_vtime(struct task_struct *tsk)
129{
130}
131#endif
132
133#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
134extern void rcu_irq_enter(void);
135extern void rcu_irq_exit(void);
136extern void rcu_nmi_enter(void);
137extern void rcu_nmi_exit(void);
138#else
139# define rcu_irq_enter() do { } while (0)
140# define rcu_irq_exit() do { } while (0)
141# define rcu_nmi_enter() do { } while (0)
142# define rcu_nmi_exit() do { } while (0)
143#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
144
145/*
146 * It is safe to do non-atomic ops on ->hardirq_context,
147 * because NMI handlers may not preempt and the ops are
148 * always balanced, so the interrupted value of ->hardirq_context
149 * will always be restored.
150 */
151#define __irq_enter()					\
152	do {						\
153		account_system_vtime(current);		\
154		add_preempt_count(HARDIRQ_OFFSET);	\
155		trace_hardirq_enter();			\
156	} while (0)
157
158/*
159 * Enter irq context (on NO_HZ, update jiffies):
160 */
161extern void irq_enter(void);
162
163/*
164 * Exit irq context without processing softirqs:
165 */
166#define __irq_exit()					\
167	do {						\
168		trace_hardirq_exit();			\
169		account_system_vtime(current);		\
170		sub_preempt_count(HARDIRQ_OFFSET);	\
171	} while (0)
172
173/*
174 * Exit irq context and process softirqs if needed:
175 */
176extern void irq_exit(void);
177
178#define nmi_enter()						\
179	do {							\
180		ftrace_nmi_enter();				\
181		BUG_ON(in_nmi());				\
182		add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
183		lockdep_off();					\
184		rcu_nmi_enter();				\
185		trace_hardirq_enter();				\
186	} while (0)
187
188#define nmi_exit()						\
189	do {							\
190		trace_hardirq_exit();				\
191		rcu_nmi_exit();					\
192		lockdep_on();					\
193		BUG_ON(!in_nmi());				\
194		sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
195		ftrace_nmi_exit();				\
196	} while (0)
197
198#endif /* LINUX_HARDIRQ_H */
199