hardirq.h revision fbb9ce9530fd9b66096d5187fa6a115d16d9746c
1#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
4#include <linux/preempt.h>
5#include <linux/smp_lock.h>
6#include <linux/lockdep.h>
7#include <asm/hardirq.h>
8#include <asm/system.h>
9
10/*
11 * We put the hardirq and softirq counter into the preemption
12 * counter. The bitmask has the following meaning:
13 *
14 * - bits 0-7 are the preemption count (max preemption depth: 256)
15 * - bits 8-15 are the softirq count (max # of softirqs: 256)
16 *
17 * The hardirq count can be overridden per architecture, the default is:
18 *
19 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
20 * - ( bit 28 is the PREEMPT_ACTIVE flag. )
21 *
22 * PREEMPT_MASK: 0x000000ff
23 * SOFTIRQ_MASK: 0x0000ff00
24 * HARDIRQ_MASK: 0x0fff0000
25 */
26#define PREEMPT_BITS	8
27#define SOFTIRQ_BITS	8
28
29#ifndef HARDIRQ_BITS
30#define HARDIRQ_BITS	12
31/*
32 * The hardirq mask has to be large enough to have space for potentially
33 * all IRQ sources in the system nesting on a single CPU.
34 */
35#if (1 << HARDIRQ_BITS) < NR_IRQS
36# error HARDIRQ_BITS is too low!
37#endif
38#endif
39
40#define PREEMPT_SHIFT	0
41#define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS)
42#define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS)
43
44#define __IRQ_MASK(x)	((1UL << (x))-1)
45
46#define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
47#define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
48#define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
49
50#define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT)
51#define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT)
52#define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
53
54#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
55#error PREEMPT_ACTIVE is too low!
56#endif
57
58#define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
59#define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
60#define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
61
62/*
63 * Are we doing bottom half or hardware interrupt processing?
64 * Are we in a softirq context? Interrupt context?
65 */
66#define in_irq()		(hardirq_count())
67#define in_softirq()		(softirq_count())
68#define in_interrupt()		(irq_count())
69
70#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
71# define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
72#else
73# define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != 0)
74#endif
75
76#ifdef CONFIG_PREEMPT
77# define preemptible()	(preempt_count() == 0 && !irqs_disabled())
78# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
79#else
80# define preemptible()	0
81# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
82#endif
83
84#ifdef CONFIG_SMP
85extern void synchronize_irq(unsigned int irq);
86#else
87# define synchronize_irq(irq)	barrier()
88#endif
89
90struct task_struct;
91
92#ifndef CONFIG_VIRT_CPU_ACCOUNTING
93static inline void account_system_vtime(struct task_struct *tsk)
94{
95}
96#endif
97
98/*
99 * It is safe to do non-atomic ops on ->hardirq_context,
100 * because NMI handlers may not preempt and the ops are
101 * always balanced, so the interrupted value of ->hardirq_context
102 * will always be restored.
103 */
104#define irq_enter()					\
105	do {						\
106		account_system_vtime(current);		\
107		add_preempt_count(HARDIRQ_OFFSET);	\
108		trace_hardirq_enter();			\
109	} while (0)
110
111/*
112 * Exit irq context without processing softirqs:
113 */
114#define __irq_exit()					\
115	do {						\
116		trace_hardirq_exit();			\
117		account_system_vtime(current);		\
118		sub_preempt_count(HARDIRQ_OFFSET);	\
119	} while (0)
120
121/*
122 * Exit irq context and process softirqs if needed:
123 */
124extern void irq_exit(void);
125
126#define nmi_enter()		do { lockdep_off(); irq_enter(); } while (0)
127#define nmi_exit()		do { __irq_exit(); lockdep_on(); } while (0)
128
129#endif /* LINUX_HARDIRQ_H */
130