1/****************************************************************************
2 ****************************************************************************
3 ***
4 ***   This header was automatically generated from a Linux kernel header
5 ***   of the same name, to make information necessary for userspace to
6 ***   call into the kernel available to libc.  It contains only constants,
7 ***   structures, and macros generated from the original header, and thus,
8 ***   contains no copyrightable information.
9 ***
10 ****************************************************************************
11 ****************************************************************************/
12#ifndef LINUX_HARDIRQ_H
13#define LINUX_HARDIRQ_H
14
15#include <linux/preempt.h>
16#include <linux/smp_lock.h>
17#include <linux/lockdep.h>
18#include <asm/hardirq.h>
19#include <asm/system.h>
20
21#define PREEMPT_BITS 8
22#define SOFTIRQ_BITS 8
23
24#ifndef HARDIRQ_BITS
25#define HARDIRQ_BITS 12
26
27#if 1 << HARDIRQ_BITS < NR_IRQS
28#error HARDIRQ_BITS is too low!
29#endif
30#endif
31
32#define PREEMPT_SHIFT 0
33#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
34#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
35
36#define __IRQ_MASK(x) ((1UL << (x))-1)
37
38#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
39#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
40#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
41
42#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
43#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
44#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
45
46#if PREEMPT_ACTIVE < 1 << HARDIRQ_SHIFT + HARDIRQ_BITS
47#error PREEMPT_ACTIVE is too low!
48#endif
49
50#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
51#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
52#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
53
54#define in_irq() (hardirq_count())
55#define in_softirq() (softirq_count())
56#define in_interrupt() (irq_count())
57
58#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
59
60#define preemptible() 0
61#define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
62
63#define synchronize_irq(irq) barrier()
64
65struct task_struct;
66
67#define irq_enter()   do {   account_system_vtime(current);   add_preempt_count(HARDIRQ_OFFSET);   trace_hardirq_enter();   } while (0)
68#define __irq_exit()   do {   trace_hardirq_exit();   account_system_vtime(current);   sub_preempt_count(HARDIRQ_OFFSET);   } while (0)
69
70#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0)
71#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
72
73#endif
74