1/* interrupt.h */ 2#ifndef _LINUX_INTERRUPT_H 3#define _LINUX_INTERRUPT_H 4 5#include <linux/kernel.h> 6#include <linux/linkage.h> 7#include <linux/bitops.h> 8#include <linux/preempt.h> 9#include <linux/cpumask.h> 10#include <linux/irqreturn.h> 11#include <linux/irqnr.h> 12#include <linux/hardirq.h> 13#include <linux/irqflags.h> 14#include <linux/smp.h> 15#include <linux/percpu.h> 16#include <linux/hrtimer.h> 17#include <linux/kref.h> 18#include <linux/workqueue.h> 19 20#include <linux/atomic.h> 21#include <asm/ptrace.h> 22 23/* 24 * These correspond to the IORESOURCE_IRQ_* defines in 25 * linux/ioport.h to select the interrupt line behaviour. When 26 * requesting an interrupt without specifying a IRQF_TRIGGER, the 27 * setting should be assumed to be "as already configured", which 28 * may be as per machine or firmware initialisation. 29 */ 30#define IRQF_TRIGGER_NONE 0x00000000 31#define IRQF_TRIGGER_RISING 0x00000001 32#define IRQF_TRIGGER_FALLING 0x00000002 33#define IRQF_TRIGGER_HIGH 0x00000004 34#define IRQF_TRIGGER_LOW 0x00000008 35#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ 36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) 37#define IRQF_TRIGGER_PROBE 0x00000010 38 39/* 40 * These flags used only by the kernel as part of the 41 * irq handling routines. 42 * 43 * IRQF_DISABLED - keep irqs disabled when calling the action handler. 44 * DEPRECATED. This flag is a NOOP and scheduled to be removed 45 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator 46 * IRQF_SHARED - allow sharing the irq among several devices 47 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur 48 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt 49 * IRQF_PERCPU - Interrupt is per cpu 50 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing 51 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 52 * registered first in an shared interrupt is considered for 53 * performance reasons) 54 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 55 * Used by threaded interrupts which need to keep the 56 * irq line disabled until the threaded handler has been run. 57 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend 58 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set 59 * IRQF_NO_THREAD - Interrupt cannot be threaded 60 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device 61 * resume time. 62 */ 63#define IRQF_DISABLED 0x00000020 64#define IRQF_SAMPLE_RANDOM 0x00000040 65#define IRQF_SHARED 0x00000080 66#define IRQF_PROBE_SHARED 0x00000100 67#define __IRQF_TIMER 0x00000200 68#define IRQF_PERCPU 0x00000400 69#define IRQF_NOBALANCING 0x00000800 70#define IRQF_IRQPOLL 0x00001000 71#define IRQF_ONESHOT 0x00002000 72#define IRQF_NO_SUSPEND 0x00004000 73#define IRQF_FORCE_RESUME 0x00008000 74#define IRQF_NO_THREAD 0x00010000 75#define IRQF_EARLY_RESUME 0x00020000 76 77#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) 78 79/* 80 * These values can be returned by request_any_context_irq() and 81 * describe the context the interrupt will be run in. 82 * 83 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context 84 * IRQC_IS_NESTED - interrupt runs in a nested threaded context 85 */ 86enum { 87 IRQC_IS_HARDIRQ = 0, 88 IRQC_IS_NESTED, 89}; 90 91typedef irqreturn_t (*irq_handler_t)(int, void *); 92 93/** 94 * struct irqaction - per interrupt action descriptor 95 * @handler: interrupt handler function 96 * @flags: flags (see IRQF_* above) 97 * @name: name of the device 98 * @dev_id: cookie to identify the device 99 * @percpu_dev_id: cookie to identify the device 100 * @next: pointer to the next irqaction for shared interrupts 101 * @irq: interrupt number 102 * @dir: pointer to the proc/irq/NN/name entry 103 * @thread_fn: interrupt handler function for threaded interrupts 104 * @thread: thread pointer for threaded interrupts 105 * @thread_flags: flags related to @thread 106 * @thread_mask: bitmask for keeping track of @thread activity 107 */ 108struct irqaction { 109 irq_handler_t handler; 110 unsigned long flags; 111 void *dev_id; 112 void __percpu *percpu_dev_id; 113 struct irqaction *next; 114 int irq; 115 irq_handler_t thread_fn; 116 struct task_struct *thread; 117 unsigned long thread_flags; 118 unsigned long thread_mask; 119 const char *name; 120 struct proc_dir_entry *dir; 121} ____cacheline_internodealigned_in_smp; 122 123extern irqreturn_t no_action(int cpl, void *dev_id); 124 125#ifdef CONFIG_GENERIC_HARDIRQS 126extern int __must_check 127request_threaded_irq(unsigned int irq, irq_handler_t handler, 128 irq_handler_t thread_fn, 129 unsigned long flags, const char *name, void *dev); 130 131static inline int __must_check 132request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, 133 const char *name, void *dev) 134{ 135 return request_threaded_irq(irq, handler, NULL, flags, name, dev); 136} 137 138extern int __must_check 139request_any_context_irq(unsigned int irq, irq_handler_t handler, 140 unsigned long flags, const char *name, void *dev_id); 141 142extern int __must_check 143request_percpu_irq(unsigned int irq, irq_handler_t handler, 144 const char *devname, void __percpu *percpu_dev_id); 145 146extern void exit_irq_thread(void); 147#else 148 149extern int __must_check 150request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, 151 const char *name, void *dev); 152 153/* 154 * Special function to avoid ifdeffery in kernel/irq/devres.c which 155 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc, 156 * m68k). I really love these $@%#!* obvious Makefile references: 157 * ../../../kernel/irq/devres.o 158 */ 159static inline int __must_check 160request_threaded_irq(unsigned int irq, irq_handler_t handler, 161 irq_handler_t thread_fn, 162 unsigned long flags, const char *name, void *dev) 163{ 164 return request_irq(irq, handler, flags, name, dev); 165} 166 167static inline int __must_check 168request_any_context_irq(unsigned int irq, irq_handler_t handler, 169 unsigned long flags, const char *name, void *dev_id) 170{ 171 return request_irq(irq, handler, flags, name, dev_id); 172} 173 174static inline int __must_check 175request_percpu_irq(unsigned int irq, irq_handler_t handler, 176 const char *devname, void __percpu *percpu_dev_id) 177{ 178 return request_irq(irq, handler, 0, devname, percpu_dev_id); 179} 180 181static inline void exit_irq_thread(void) { } 182#endif 183 184extern void free_irq(unsigned int, void *); 185extern void free_percpu_irq(unsigned int, void __percpu *); 186 187struct device; 188 189extern int __must_check 190devm_request_threaded_irq(struct device *dev, unsigned int irq, 191 irq_handler_t handler, irq_handler_t thread_fn, 192 unsigned long irqflags, const char *devname, 193 void *dev_id); 194 195static inline int __must_check 196devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, 197 unsigned long irqflags, const char *devname, void *dev_id) 198{ 199 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, 200 devname, dev_id); 201} 202 203extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 204 205/* 206 * On lockdep we dont want to enable hardirqs in hardirq 207 * context. Use local_irq_enable_in_hardirq() to annotate 208 * kernel code that has to do this nevertheless (pretty much 209 * the only valid case is for old/broken hardware that is 210 * insanely slow). 211 * 212 * NOTE: in theory this might break fragile code that relies 213 * on hardirq delivery - in practice we dont seem to have such 214 * places left. So the only effect should be slightly increased 215 * irqs-off latencies. 216 */ 217#ifdef CONFIG_LOCKDEP 218# define local_irq_enable_in_hardirq() do { } while (0) 219#else 220# define local_irq_enable_in_hardirq() local_irq_enable() 221#endif 222 223extern void disable_irq_nosync(unsigned int irq); 224extern void disable_irq(unsigned int irq); 225extern void disable_percpu_irq(unsigned int irq); 226extern void enable_irq(unsigned int irq); 227extern void enable_percpu_irq(unsigned int irq, unsigned int type); 228 229/* The following three functions are for the core kernel use only. */ 230#ifdef CONFIG_GENERIC_HARDIRQS 231extern void suspend_device_irqs(void); 232extern void resume_device_irqs(void); 233#ifdef CONFIG_PM_SLEEP 234extern int check_wakeup_irqs(void); 235#else 236static inline int check_wakeup_irqs(void) { return 0; } 237#endif 238#else 239static inline void suspend_device_irqs(void) { }; 240static inline void resume_device_irqs(void) { }; 241static inline int check_wakeup_irqs(void) { return 0; } 242#endif 243 244#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 245 246extern cpumask_var_t irq_default_affinity; 247 248extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 249extern int irq_can_set_affinity(unsigned int irq); 250extern int irq_select_affinity(unsigned int irq); 251 252extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); 253 254/** 255 * struct irq_affinity_notify - context for notification of IRQ affinity changes 256 * @irq: Interrupt to which notification applies 257 * @kref: Reference count, for internal use 258 * @work: Work item, for internal use 259 * @notify: Function to be called on change. This will be 260 * called in process context. 261 * @release: Function to be called on release. This will be 262 * called in process context. Once registered, the 263 * structure must only be freed when this function is 264 * called or later. 265 */ 266struct irq_affinity_notify { 267 unsigned int irq; 268 struct kref kref; 269 struct work_struct work; 270 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); 271 void (*release)(struct kref *ref); 272}; 273 274extern int 275irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 276 277static inline void irq_run_affinity_notifiers(void) 278{ 279 flush_scheduled_work(); 280} 281 282#else /* CONFIG_SMP */ 283 284static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 285{ 286 return -EINVAL; 287} 288 289static inline int irq_can_set_affinity(unsigned int irq) 290{ 291 return 0; 292} 293 294static inline int irq_select_affinity(unsigned int irq) { return 0; } 295 296static inline int irq_set_affinity_hint(unsigned int irq, 297 const struct cpumask *m) 298{ 299 return -EINVAL; 300} 301#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ 302 303#ifdef CONFIG_GENERIC_HARDIRQS 304/* 305 * Special lockdep variants of irq disabling/enabling. 306 * These should be used for locking constructs that 307 * know that a particular irq context which is disabled, 308 * and which is the only irq-context user of a lock, 309 * that it's safe to take the lock in the irq-disabled 310 * section without disabling hardirqs. 311 * 312 * On !CONFIG_LOCKDEP they are equivalent to the normal 313 * irq disable/enable methods. 314 */ 315static inline void disable_irq_nosync_lockdep(unsigned int irq) 316{ 317 disable_irq_nosync(irq); 318#ifdef CONFIG_LOCKDEP 319 local_irq_disable(); 320#endif 321} 322 323static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) 324{ 325 disable_irq_nosync(irq); 326#ifdef CONFIG_LOCKDEP 327 local_irq_save(*flags); 328#endif 329} 330 331static inline void disable_irq_lockdep(unsigned int irq) 332{ 333 disable_irq(irq); 334#ifdef CONFIG_LOCKDEP 335 local_irq_disable(); 336#endif 337} 338 339static inline void enable_irq_lockdep(unsigned int irq) 340{ 341#ifdef CONFIG_LOCKDEP 342 local_irq_enable(); 343#endif 344 enable_irq(irq); 345} 346 347static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) 348{ 349#ifdef CONFIG_LOCKDEP 350 local_irq_restore(*flags); 351#endif 352 enable_irq(irq); 353} 354 355/* IRQ wakeup (PM) control: */ 356extern int irq_set_irq_wake(unsigned int irq, unsigned int on); 357 358static inline int enable_irq_wake(unsigned int irq) 359{ 360 return irq_set_irq_wake(irq, 1); 361} 362 363static inline int disable_irq_wake(unsigned int irq) 364{ 365 return irq_set_irq_wake(irq, 0); 366} 367 368#else /* !CONFIG_GENERIC_HARDIRQS */ 369/* 370 * NOTE: non-genirq architectures, if they want to support the lock 371 * validator need to define the methods below in their asm/irq.h 372 * files, under an #ifdef CONFIG_LOCKDEP section. 373 */ 374#ifndef CONFIG_LOCKDEP 375# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) 376# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ 377 disable_irq_nosync(irq) 378# define disable_irq_lockdep(irq) disable_irq(irq) 379# define enable_irq_lockdep(irq) enable_irq(irq) 380# define enable_irq_lockdep_irqrestore(irq, flags) \ 381 enable_irq(irq) 382# endif 383 384static inline int enable_irq_wake(unsigned int irq) 385{ 386 return 0; 387} 388 389static inline int disable_irq_wake(unsigned int irq) 390{ 391 return 0; 392} 393#endif /* CONFIG_GENERIC_HARDIRQS */ 394 395 396#ifdef CONFIG_IRQ_FORCED_THREADING 397extern bool force_irqthreads; 398#else 399#define force_irqthreads (0) 400#endif 401 402#ifndef __ARCH_SET_SOFTIRQ_PENDING 403#define set_softirq_pending(x) (local_softirq_pending() = (x)) 404#define or_softirq_pending(x) (local_softirq_pending() |= (x)) 405#endif 406 407/* Some architectures might implement lazy enabling/disabling of 408 * interrupts. In some cases, such as stop_machine, we might want 409 * to ensure that after a local_irq_disable(), interrupts have 410 * really been disabled in hardware. Such architectures need to 411 * implement the following hook. 412 */ 413#ifndef hard_irq_disable 414#define hard_irq_disable() do { } while(0) 415#endif 416 417/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 418 frequency threaded job scheduling. For almost all the purposes 419 tasklets are more than enough. F.e. all serial device BHs et 420 al. should be converted to tasklets, not to softirqs. 421 */ 422 423enum 424{ 425 HI_SOFTIRQ=0, 426 TIMER_SOFTIRQ, 427 NET_TX_SOFTIRQ, 428 NET_RX_SOFTIRQ, 429 BLOCK_SOFTIRQ, 430 BLOCK_IOPOLL_SOFTIRQ, 431 TASKLET_SOFTIRQ, 432 SCHED_SOFTIRQ, 433 HRTIMER_SOFTIRQ, 434 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 435 436 NR_SOFTIRQS 437}; 438 439/* map softirq index to softirq name. update 'softirq_to_name' in 440 * kernel/softirq.c when adding a new softirq. 441 */ 442extern char *softirq_to_name[NR_SOFTIRQS]; 443 444/* softirq mask and active fields moved to irq_cpustat_t in 445 * asm/hardirq.h to get better cache usage. KAO 446 */ 447 448struct softirq_action 449{ 450 void (*action)(struct softirq_action *); 451}; 452 453asmlinkage void do_softirq(void); 454asmlinkage void __do_softirq(void); 455extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 456extern void softirq_init(void); 457extern void __raise_softirq_irqoff(unsigned int nr); 458 459extern void raise_softirq_irqoff(unsigned int nr); 460extern void raise_softirq(unsigned int nr); 461 462/* This is the worklist that queues up per-cpu softirq work. 463 * 464 * send_remote_sendirq() adds work to these lists, and 465 * the softirq handler itself dequeues from them. The queues 466 * are protected by disabling local cpu interrupts and they must 467 * only be accessed by the local cpu that they are for. 468 */ 469DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 470 471DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 472 473static inline struct task_struct *this_cpu_ksoftirqd(void) 474{ 475 return this_cpu_read(ksoftirqd); 476} 477 478/* Try to send a softirq to a remote cpu. If this cannot be done, the 479 * work will be queued to the local cpu. 480 */ 481extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); 482 483/* Like send_remote_softirq(), but the caller must disable local cpu interrupts 484 * and compute the current cpu, passed in as 'this_cpu'. 485 */ 486extern void __send_remote_softirq(struct call_single_data *cp, int cpu, 487 int this_cpu, int softirq); 488 489/* Tasklets --- multithreaded analogue of BHs. 490 491 Main feature differing them of generic softirqs: tasklet 492 is running only on one CPU simultaneously. 493 494 Main feature differing them of BHs: different tasklets 495 may be run simultaneously on different CPUs. 496 497 Properties: 498 * If tasklet_schedule() is called, then tasklet is guaranteed 499 to be executed on some cpu at least once after this. 500 * If the tasklet is already scheduled, but its execution is still not 501 started, it will be executed only once. 502 * If this tasklet is already running on another CPU (or schedule is called 503 from tasklet itself), it is rescheduled for later. 504 * Tasklet is strictly serialized wrt itself, but not 505 wrt another tasklets. If client needs some intertask synchronization, 506 he makes it with spinlocks. 507 */ 508 509struct tasklet_struct 510{ 511 struct tasklet_struct *next; 512 unsigned long state; 513 atomic_t count; 514 void (*func)(unsigned long); 515 unsigned long data; 516}; 517 518#define DECLARE_TASKLET(name, func, data) \ 519struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } 520 521#define DECLARE_TASKLET_DISABLED(name, func, data) \ 522struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } 523 524 525enum 526{ 527 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ 528 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ 529}; 530 531#ifdef CONFIG_SMP 532static inline int tasklet_trylock(struct tasklet_struct *t) 533{ 534 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); 535} 536 537static inline void tasklet_unlock(struct tasklet_struct *t) 538{ 539 smp_mb__before_clear_bit(); 540 clear_bit(TASKLET_STATE_RUN, &(t)->state); 541} 542 543static inline void tasklet_unlock_wait(struct tasklet_struct *t) 544{ 545 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } 546} 547#else 548#define tasklet_trylock(t) 1 549#define tasklet_unlock_wait(t) do { } while (0) 550#define tasklet_unlock(t) do { } while (0) 551#endif 552 553extern void __tasklet_schedule(struct tasklet_struct *t); 554 555static inline void tasklet_schedule(struct tasklet_struct *t) 556{ 557 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 558 __tasklet_schedule(t); 559} 560 561extern void __tasklet_hi_schedule(struct tasklet_struct *t); 562 563static inline void tasklet_hi_schedule(struct tasklet_struct *t) 564{ 565 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 566 __tasklet_hi_schedule(t); 567} 568 569extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); 570 571/* 572 * This version avoids touching any other tasklets. Needed for kmemcheck 573 * in order not to take any page faults while enqueueing this tasklet; 574 * consider VERY carefully whether you really need this or 575 * tasklet_hi_schedule()... 576 */ 577static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) 578{ 579 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 580 __tasklet_hi_schedule_first(t); 581} 582 583 584static inline void tasklet_disable_nosync(struct tasklet_struct *t) 585{ 586 atomic_inc(&t->count); 587 smp_mb__after_atomic_inc(); 588} 589 590static inline void tasklet_disable(struct tasklet_struct *t) 591{ 592 tasklet_disable_nosync(t); 593 tasklet_unlock_wait(t); 594 smp_mb(); 595} 596 597static inline void tasklet_enable(struct tasklet_struct *t) 598{ 599 smp_mb__before_atomic_dec(); 600 atomic_dec(&t->count); 601} 602 603static inline void tasklet_hi_enable(struct tasklet_struct *t) 604{ 605 smp_mb__before_atomic_dec(); 606 atomic_dec(&t->count); 607} 608 609extern void tasklet_kill(struct tasklet_struct *t); 610extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); 611extern void tasklet_init(struct tasklet_struct *t, 612 void (*func)(unsigned long), unsigned long data); 613 614struct tasklet_hrtimer { 615 struct hrtimer timer; 616 struct tasklet_struct tasklet; 617 enum hrtimer_restart (*function)(struct hrtimer *); 618}; 619 620extern void 621tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, 622 enum hrtimer_restart (*function)(struct hrtimer *), 623 clockid_t which_clock, enum hrtimer_mode mode); 624 625static inline 626int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, 627 const enum hrtimer_mode mode) 628{ 629 return hrtimer_start(&ttimer->timer, time, mode); 630} 631 632static inline 633void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) 634{ 635 hrtimer_cancel(&ttimer->timer); 636 tasklet_kill(&ttimer->tasklet); 637} 638 639/* 640 * Autoprobing for irqs: 641 * 642 * probe_irq_on() and probe_irq_off() provide robust primitives 643 * for accurate IRQ probing during kernel initialization. They are 644 * reasonably simple to use, are not "fooled" by spurious interrupts, 645 * and, unlike other attempts at IRQ probing, they do not get hung on 646 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). 647 * 648 * For reasonably foolproof probing, use them as follows: 649 * 650 * 1. clear and/or mask the device's internal interrupt. 651 * 2. sti(); 652 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs 653 * 4. enable the device and cause it to trigger an interrupt. 654 * 5. wait for the device to interrupt, using non-intrusive polling or a delay. 655 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple 656 * 7. service the device to clear its pending interrupt. 657 * 8. loop again if paranoia is required. 658 * 659 * probe_irq_on() returns a mask of allocated irq's. 660 * 661 * probe_irq_off() takes the mask as a parameter, 662 * and returns the irq number which occurred, 663 * or zero if none occurred, or a negative irq number 664 * if more than one irq occurred. 665 */ 666 667#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) 668static inline unsigned long probe_irq_on(void) 669{ 670 return 0; 671} 672static inline int probe_irq_off(unsigned long val) 673{ 674 return 0; 675} 676static inline unsigned int probe_irq_mask(unsigned long val) 677{ 678 return 0; 679} 680#else 681extern unsigned long probe_irq_on(void); /* returns 0 on failure */ 682extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ 683extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ 684#endif 685 686#ifdef CONFIG_PROC_FS 687/* Initialize /proc/irq/ */ 688extern void init_irq_proc(void); 689#else 690static inline void init_irq_proc(void) 691{ 692} 693#endif 694 695struct seq_file; 696int show_interrupts(struct seq_file *p, void *v); 697int arch_show_interrupts(struct seq_file *p, int prec); 698 699extern int early_irq_init(void); 700extern int arch_probe_nr_irqs(void); 701extern int arch_early_irq_init(void); 702 703#endif 704