hardirq.h revision bcdb714c8856c76383ca455294f0074168705eab
1#ifndef LINUX_HARDIRQ_H 2#define LINUX_HARDIRQ_H 3 4#include <linux/preempt.h> 5#ifdef CONFIG_PREEMPT 6#include <linux/smp_lock.h> 7#endif 8#include <linux/lockdep.h> 9#include <linux/ftrace_irq.h> 10#include <asm/hardirq.h> 11 12/* 13 * We put the hardirq and softirq counter into the preemption 14 * counter. The bitmask has the following meaning: 15 * 16 * - bits 0-7 are the preemption count (max preemption depth: 256) 17 * - bits 8-15 are the softirq count (max # of softirqs: 256) 18 * 19 * The hardirq count can in theory reach the same as NR_IRQS. 20 * In reality, the number of nested IRQS is limited to the stack 21 * size as well. For archs with over 1000 IRQS it is not practical 22 * to expect that they will all nest. We give a max of 10 bits for 23 * hardirq nesting. An arch may choose to give less than 10 bits. 24 * m68k expects it to be 8. 25 * 26 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) 27 * - bit 26 is the NMI_MASK 28 * - bit 28 is the PREEMPT_ACTIVE flag 29 * 30 * PREEMPT_MASK: 0x000000ff 31 * SOFTIRQ_MASK: 0x0000ff00 32 * HARDIRQ_MASK: 0x03ff0000 33 * NMI_MASK: 0x04000000 34 */ 35#define PREEMPT_BITS 8 36#define SOFTIRQ_BITS 8 37#define NMI_BITS 1 38 39#define MAX_HARDIRQ_BITS 10 40 41#ifndef HARDIRQ_BITS 42# define HARDIRQ_BITS MAX_HARDIRQ_BITS 43#endif 44 45#if HARDIRQ_BITS > MAX_HARDIRQ_BITS 46#error HARDIRQ_BITS too high! 47#endif 48 49#define PREEMPT_SHIFT 0 50#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 51#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 52#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) 53 54#define __IRQ_MASK(x) ((1UL << (x))-1) 55 56#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 57#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 58#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 59#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) 60 61#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 62#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 63#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 64#define NMI_OFFSET (1UL << NMI_SHIFT) 65 66#ifndef PREEMPT_ACTIVE 67#define PREEMPT_ACTIVE_BITS 1 68#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) 69#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) 70#endif 71 72#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) 73#error PREEMPT_ACTIVE is too low! 74#endif 75 76#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 77#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 78#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ 79 | NMI_MASK)) 80 81/* 82 * Are we doing bottom half or hardware interrupt processing? 83 * Are we in a softirq context? Interrupt context? 84 */ 85#define in_irq() (hardirq_count()) 86#define in_softirq() (softirq_count()) 87#define in_interrupt() (irq_count()) 88 89/* 90 * Are we in NMI context? 91 */ 92#define in_nmi() (preempt_count() & NMI_MASK) 93 94#if defined(CONFIG_PREEMPT) 95# define PREEMPT_INATOMIC_BASE kernel_locked() 96# define PREEMPT_CHECK_OFFSET 1 97#else 98# define PREEMPT_INATOMIC_BASE 0 99# define PREEMPT_CHECK_OFFSET 0 100#endif 101 102/* 103 * Are we running in atomic context? WARNING: this macro cannot 104 * always detect atomic context; in particular, it cannot know about 105 * held spinlocks in non-preemptible kernels. Thus it should not be 106 * used in the general case to determine whether sleeping is possible. 107 * Do not use in_atomic() in driver code. 108 */ 109#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE) 110 111/* 112 * Check whether we were atomic before we did preempt_disable(): 113 * (used by the scheduler, *after* releasing the kernel lock) 114 */ 115#define in_atomic_preempt_off() \ 116 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) 117 118#ifdef CONFIG_PREEMPT 119# define preemptible() (preempt_count() == 0 && !irqs_disabled()) 120# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) 121#else 122# define preemptible() 0 123# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET 124#endif 125 126#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) 127extern void synchronize_irq(unsigned int irq); 128#else 129# define synchronize_irq(irq) barrier() 130#endif 131 132struct task_struct; 133 134#ifndef CONFIG_VIRT_CPU_ACCOUNTING 135static inline void account_system_vtime(struct task_struct *tsk) 136{ 137} 138#endif 139 140#if defined(CONFIG_NO_HZ) 141#if defined(CONFIG_TINY_RCU) 142extern void rcu_enter_nohz(void); 143extern void rcu_exit_nohz(void); 144 145static inline void rcu_irq_enter(void) 146{ 147 rcu_exit_nohz(); 148} 149 150static inline void rcu_irq_exit(void) 151{ 152 rcu_enter_nohz(); 153} 154 155static inline void rcu_nmi_enter(void) 156{ 157} 158 159static inline void rcu_nmi_exit(void) 160{ 161} 162 163#else 164extern void rcu_irq_enter(void); 165extern void rcu_irq_exit(void); 166extern void rcu_nmi_enter(void); 167extern void rcu_nmi_exit(void); 168#endif 169#else 170# define rcu_irq_enter() do { } while (0) 171# define rcu_irq_exit() do { } while (0) 172# define rcu_nmi_enter() do { } while (0) 173# define rcu_nmi_exit() do { } while (0) 174#endif /* #if defined(CONFIG_NO_HZ) */ 175 176/* 177 * It is safe to do non-atomic ops on ->hardirq_context, 178 * because NMI handlers may not preempt and the ops are 179 * always balanced, so the interrupted value of ->hardirq_context 180 * will always be restored. 181 */ 182#define __irq_enter() \ 183 do { \ 184 account_system_vtime(current); \ 185 add_preempt_count(HARDIRQ_OFFSET); \ 186 trace_hardirq_enter(); \ 187 } while (0) 188 189/* 190 * Enter irq context (on NO_HZ, update jiffies): 191 */ 192extern void irq_enter(void); 193 194/* 195 * Exit irq context without processing softirqs: 196 */ 197#define __irq_exit() \ 198 do { \ 199 trace_hardirq_exit(); \ 200 account_system_vtime(current); \ 201 sub_preempt_count(HARDIRQ_OFFSET); \ 202 } while (0) 203 204/* 205 * Exit irq context and process softirqs if needed: 206 */ 207extern void irq_exit(void); 208 209#define nmi_enter() \ 210 do { \ 211 ftrace_nmi_enter(); \ 212 BUG_ON(in_nmi()); \ 213 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 214 lockdep_off(); \ 215 rcu_nmi_enter(); \ 216 trace_hardirq_enter(); \ 217 } while (0) 218 219#define nmi_exit() \ 220 do { \ 221 trace_hardirq_exit(); \ 222 rcu_nmi_exit(); \ 223 lockdep_on(); \ 224 BUG_ON(!in_nmi()); \ 225 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 226 ftrace_nmi_exit(); \ 227 } while (0) 228 229#endif /* LINUX_HARDIRQ_H */ 230