1/* 2 * linux/arch/ia64/kernel/irq_ia64.c 3 * 4 * Copyright (C) 1998-2001 Hewlett-Packard Co 5 * Stephane Eranian <eranian@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 * 8 * 6/10/99: Updated to bring in sync with x86 version to facilitate 9 * support for SMP and different interrupt controllers. 10 * 11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector 12 * PCI to vector allocation routine. 13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com> 14 * Added CPU Hotplug handling for IPF. 15 */ 16 17#include <linux/module.h> 18 19#include <linux/jiffies.h> 20#include <linux/errno.h> 21#include <linux/init.h> 22#include <linux/interrupt.h> 23#include <linux/ioport.h> 24#include <linux/kernel_stat.h> 25#include <linux/ptrace.h> 26#include <linux/signal.h> 27#include <linux/smp.h> 28#include <linux/threads.h> 29#include <linux/bitops.h> 30#include <linux/irq.h> 31#include <linux/ratelimit.h> 32#include <linux/acpi.h> 33#include <linux/sched.h> 34 35#include <asm/delay.h> 36#include <asm/intrinsics.h> 37#include <asm/io.h> 38#include <asm/hw_irq.h> 39#include <asm/machvec.h> 40#include <asm/pgtable.h> 41#include <asm/tlbflush.h> 42 43#ifdef CONFIG_PERFMON 44# include <asm/perfmon.h> 45#endif 46 47#define IRQ_DEBUG 0 48 49#define IRQ_VECTOR_UNASSIGNED (0) 50 51#define IRQ_UNUSED (0) 52#define IRQ_USED (1) 53#define IRQ_RSVD (2) 54 55/* These can be overridden in platform_irq_init */ 56int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; 57int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; 58 59/* default base addr of IPI table */ 60void __iomem *ipi_base_addr = ((void __iomem *) 61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); 62 63static cpumask_t vector_allocation_domain(int cpu); 64 65/* 66 * Legacy IRQ to IA-64 vector translation table. 67 */ 68__u8 isa_irq_to_vector_map[16] = { 69 /* 8259 IRQ translation, first 16 entries */ 70 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 71 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 72}; 73EXPORT_SYMBOL(isa_irq_to_vector_map); 74 75DEFINE_SPINLOCK(vector_lock); 76 77struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { 78 [0 ... NR_IRQS - 1] = { 79 .vector = IRQ_VECTOR_UNASSIGNED, 80 .domain = CPU_MASK_NONE 81 } 82}; 83 84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { 85 [0 ... IA64_NUM_VECTORS - 1] = -1 86}; 87 88static cpumask_t vector_table[IA64_NUM_VECTORS] = { 89 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE 90}; 91 92static int irq_status[NR_IRQS] = { 93 [0 ... NR_IRQS -1] = IRQ_UNUSED 94}; 95 96static inline int find_unassigned_irq(void) 97{ 98 int irq; 99 100 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++) 101 if (irq_status[irq] == IRQ_UNUSED) 102 return irq; 103 return -ENOSPC; 104} 105 106static inline int find_unassigned_vector(cpumask_t domain) 107{ 108 cpumask_t mask; 109 int pos, vector; 110 111 cpumask_and(&mask, &domain, cpu_online_mask); 112 if (cpus_empty(mask)) 113 return -EINVAL; 114 115 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { 116 vector = IA64_FIRST_DEVICE_VECTOR + pos; 117 cpus_and(mask, domain, vector_table[vector]); 118 if (!cpus_empty(mask)) 119 continue; 120 return vector; 121 } 122 return -ENOSPC; 123} 124 125static int __bind_irq_vector(int irq, int vector, cpumask_t domain) 126{ 127 cpumask_t mask; 128 int cpu; 129 struct irq_cfg *cfg = &irq_cfg[irq]; 130 131 BUG_ON((unsigned)irq >= NR_IRQS); 132 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); 133 134 cpumask_and(&mask, &domain, cpu_online_mask); 135 if (cpus_empty(mask)) 136 return -EINVAL; 137 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) 138 return 0; 139 if (cfg->vector != IRQ_VECTOR_UNASSIGNED) 140 return -EBUSY; 141 for_each_cpu_mask(cpu, mask) 142 per_cpu(vector_irq, cpu)[vector] = irq; 143 cfg->vector = vector; 144 cfg->domain = domain; 145 irq_status[irq] = IRQ_USED; 146 cpus_or(vector_table[vector], vector_table[vector], domain); 147 return 0; 148} 149 150int bind_irq_vector(int irq, int vector, cpumask_t domain) 151{ 152 unsigned long flags; 153 int ret; 154 155 spin_lock_irqsave(&vector_lock, flags); 156 ret = __bind_irq_vector(irq, vector, domain); 157 spin_unlock_irqrestore(&vector_lock, flags); 158 return ret; 159} 160 161static void __clear_irq_vector(int irq) 162{ 163 int vector, cpu; 164 cpumask_t mask; 165 cpumask_t domain; 166 struct irq_cfg *cfg = &irq_cfg[irq]; 167 168 BUG_ON((unsigned)irq >= NR_IRQS); 169 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); 170 vector = cfg->vector; 171 domain = cfg->domain; 172 cpumask_and(&mask, &cfg->domain, cpu_online_mask); 173 for_each_cpu_mask(cpu, mask) 174 per_cpu(vector_irq, cpu)[vector] = -1; 175 cfg->vector = IRQ_VECTOR_UNASSIGNED; 176 cfg->domain = CPU_MASK_NONE; 177 irq_status[irq] = IRQ_UNUSED; 178 cpus_andnot(vector_table[vector], vector_table[vector], domain); 179} 180 181static void clear_irq_vector(int irq) 182{ 183 unsigned long flags; 184 185 spin_lock_irqsave(&vector_lock, flags); 186 __clear_irq_vector(irq); 187 spin_unlock_irqrestore(&vector_lock, flags); 188} 189 190int 191ia64_native_assign_irq_vector (int irq) 192{ 193 unsigned long flags; 194 int vector, cpu; 195 cpumask_t domain = CPU_MASK_NONE; 196 197 vector = -ENOSPC; 198 199 spin_lock_irqsave(&vector_lock, flags); 200 for_each_online_cpu(cpu) { 201 domain = vector_allocation_domain(cpu); 202 vector = find_unassigned_vector(domain); 203 if (vector >= 0) 204 break; 205 } 206 if (vector < 0) 207 goto out; 208 if (irq == AUTO_ASSIGN) 209 irq = vector; 210 BUG_ON(__bind_irq_vector(irq, vector, domain)); 211 out: 212 spin_unlock_irqrestore(&vector_lock, flags); 213 return vector; 214} 215 216void 217ia64_native_free_irq_vector (int vector) 218{ 219 if (vector < IA64_FIRST_DEVICE_VECTOR || 220 vector > IA64_LAST_DEVICE_VECTOR) 221 return; 222 clear_irq_vector(vector); 223} 224 225int 226reserve_irq_vector (int vector) 227{ 228 if (vector < IA64_FIRST_DEVICE_VECTOR || 229 vector > IA64_LAST_DEVICE_VECTOR) 230 return -EINVAL; 231 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL); 232} 233 234/* 235 * Initialize vector_irq on a new cpu. This function must be called 236 * with vector_lock held. 237 */ 238void __setup_vector_irq(int cpu) 239{ 240 int irq, vector; 241 242 /* Clear vector_irq */ 243 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) 244 per_cpu(vector_irq, cpu)[vector] = -1; 245 /* Mark the inuse vectors */ 246 for (irq = 0; irq < NR_IRQS; ++irq) { 247 if (!cpu_isset(cpu, irq_cfg[irq].domain)) 248 continue; 249 vector = irq_to_vector(irq); 250 per_cpu(vector_irq, cpu)[vector] = irq; 251 } 252} 253 254#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) 255 256static enum vector_domain_type { 257 VECTOR_DOMAIN_NONE, 258 VECTOR_DOMAIN_PERCPU 259} vector_domain_type = VECTOR_DOMAIN_NONE; 260 261static cpumask_t vector_allocation_domain(int cpu) 262{ 263 if (vector_domain_type == VECTOR_DOMAIN_PERCPU) 264 return cpumask_of_cpu(cpu); 265 return CPU_MASK_ALL; 266} 267 268static int __irq_prepare_move(int irq, int cpu) 269{ 270 struct irq_cfg *cfg = &irq_cfg[irq]; 271 int vector; 272 cpumask_t domain; 273 274 if (cfg->move_in_progress || cfg->move_cleanup_count) 275 return -EBUSY; 276 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) 277 return -EINVAL; 278 if (cpu_isset(cpu, cfg->domain)) 279 return 0; 280 domain = vector_allocation_domain(cpu); 281 vector = find_unassigned_vector(domain); 282 if (vector < 0) 283 return -ENOSPC; 284 cfg->move_in_progress = 1; 285 cfg->old_domain = cfg->domain; 286 cfg->vector = IRQ_VECTOR_UNASSIGNED; 287 cfg->domain = CPU_MASK_NONE; 288 BUG_ON(__bind_irq_vector(irq, vector, domain)); 289 return 0; 290} 291 292int irq_prepare_move(int irq, int cpu) 293{ 294 unsigned long flags; 295 int ret; 296 297 spin_lock_irqsave(&vector_lock, flags); 298 ret = __irq_prepare_move(irq, cpu); 299 spin_unlock_irqrestore(&vector_lock, flags); 300 return ret; 301} 302 303void irq_complete_move(unsigned irq) 304{ 305 struct irq_cfg *cfg = &irq_cfg[irq]; 306 cpumask_t cleanup_mask; 307 int i; 308 309 if (likely(!cfg->move_in_progress)) 310 return; 311 312 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) 313 return; 314 315 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); 316 cfg->move_cleanup_count = cpus_weight(cleanup_mask); 317 for_each_cpu_mask(i, cleanup_mask) 318 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); 319 cfg->move_in_progress = 0; 320} 321 322static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) 323{ 324 int me = smp_processor_id(); 325 ia64_vector vector; 326 unsigned long flags; 327 328 for (vector = IA64_FIRST_DEVICE_VECTOR; 329 vector < IA64_LAST_DEVICE_VECTOR; vector++) { 330 int irq; 331 struct irq_desc *desc; 332 struct irq_cfg *cfg; 333 irq = __this_cpu_read(vector_irq[vector]); 334 if (irq < 0) 335 continue; 336 337 desc = irq_to_desc(irq); 338 cfg = irq_cfg + irq; 339 raw_spin_lock(&desc->lock); 340 if (!cfg->move_cleanup_count) 341 goto unlock; 342 343 if (!cpu_isset(me, cfg->old_domain)) 344 goto unlock; 345 346 spin_lock_irqsave(&vector_lock, flags); 347 __this_cpu_write(vector_irq[vector], -1); 348 cpu_clear(me, vector_table[vector]); 349 spin_unlock_irqrestore(&vector_lock, flags); 350 cfg->move_cleanup_count--; 351 unlock: 352 raw_spin_unlock(&desc->lock); 353 } 354 return IRQ_HANDLED; 355} 356 357static struct irqaction irq_move_irqaction = { 358 .handler = smp_irq_move_cleanup_interrupt, 359 .name = "irq_move" 360}; 361 362static int __init parse_vector_domain(char *arg) 363{ 364 if (!arg) 365 return -EINVAL; 366 if (!strcmp(arg, "percpu")) { 367 vector_domain_type = VECTOR_DOMAIN_PERCPU; 368 no_int_routing = 1; 369 } 370 return 0; 371} 372early_param("vector", parse_vector_domain); 373#else 374static cpumask_t vector_allocation_domain(int cpu) 375{ 376 return CPU_MASK_ALL; 377} 378#endif 379 380 381void destroy_and_reserve_irq(unsigned int irq) 382{ 383 unsigned long flags; 384 385 irq_init_desc(irq); 386 spin_lock_irqsave(&vector_lock, flags); 387 __clear_irq_vector(irq); 388 irq_status[irq] = IRQ_RSVD; 389 spin_unlock_irqrestore(&vector_lock, flags); 390} 391 392/* 393 * Dynamic irq allocate and deallocation for MSI 394 */ 395int create_irq(void) 396{ 397 unsigned long flags; 398 int irq, vector, cpu; 399 cpumask_t domain = CPU_MASK_NONE; 400 401 irq = vector = -ENOSPC; 402 spin_lock_irqsave(&vector_lock, flags); 403 for_each_online_cpu(cpu) { 404 domain = vector_allocation_domain(cpu); 405 vector = find_unassigned_vector(domain); 406 if (vector >= 0) 407 break; 408 } 409 if (vector < 0) 410 goto out; 411 irq = find_unassigned_irq(); 412 if (irq < 0) 413 goto out; 414 BUG_ON(__bind_irq_vector(irq, vector, domain)); 415 out: 416 spin_unlock_irqrestore(&vector_lock, flags); 417 if (irq >= 0) 418 irq_init_desc(irq); 419 return irq; 420} 421 422void destroy_irq(unsigned int irq) 423{ 424 irq_init_desc(irq); 425 clear_irq_vector(irq); 426} 427 428#ifdef CONFIG_SMP 429# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) 430# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH) 431#else 432# define IS_RESCHEDULE(vec) (0) 433# define IS_LOCAL_TLB_FLUSH(vec) (0) 434#endif 435/* 436 * That's where the IVT branches when we get an external 437 * interrupt. This branches to the correct hardware IRQ handler via 438 * function ptr. 439 */ 440void 441ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) 442{ 443 struct pt_regs *old_regs = set_irq_regs(regs); 444 unsigned long saved_tpr; 445 446#if IRQ_DEBUG 447 { 448 unsigned long bsp, sp; 449 450 /* 451 * Note: if the interrupt happened while executing in 452 * the context switch routine (ia64_switch_to), we may 453 * get a spurious stack overflow here. This is 454 * because the register and the memory stack are not 455 * switched atomically. 456 */ 457 bsp = ia64_getreg(_IA64_REG_AR_BSP); 458 sp = ia64_getreg(_IA64_REG_SP); 459 460 if ((sp - bsp) < 1024) { 461 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); 462 463 if (__ratelimit(&ratelimit)) { 464 printk("ia64_handle_irq: DANGER: less than " 465 "1KB of free stack space!!\n" 466 "(bsp=0x%lx, sp=%lx)\n", bsp, sp); 467 } 468 } 469 } 470#endif /* IRQ_DEBUG */ 471 472 /* 473 * Always set TPR to limit maximum interrupt nesting depth to 474 * 16 (without this, it would be ~240, which could easily lead 475 * to kernel stack overflows). 476 */ 477 irq_enter(); 478 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 479 ia64_srlz_d(); 480 while (vector != IA64_SPURIOUS_INT_VECTOR) { 481 int irq = local_vector_to_irq(vector); 482 483 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { 484 smp_local_flush_tlb(); 485 kstat_incr_irq_this_cpu(irq); 486 } else if (unlikely(IS_RESCHEDULE(vector))) { 487 scheduler_ipi(); 488 kstat_incr_irq_this_cpu(irq); 489 } else { 490 ia64_setreg(_IA64_REG_CR_TPR, vector); 491 ia64_srlz_d(); 492 493 if (unlikely(irq < 0)) { 494 printk(KERN_ERR "%s: Unexpected interrupt " 495 "vector %d on CPU %d is not mapped " 496 "to any IRQ!\n", __func__, vector, 497 smp_processor_id()); 498 } else 499 generic_handle_irq(irq); 500 501 /* 502 * Disable interrupts and send EOI: 503 */ 504 local_irq_disable(); 505 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); 506 } 507 ia64_eoi(); 508 vector = ia64_get_ivr(); 509 } 510 /* 511 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq 512 * handler needs to be able to wait for further keyboard interrupts, which can't 513 * come through until ia64_eoi() has been done. 514 */ 515 irq_exit(); 516 set_irq_regs(old_regs); 517} 518 519#ifdef CONFIG_HOTPLUG_CPU 520/* 521 * This function emulates a interrupt processing when a cpu is about to be 522 * brought down. 523 */ 524void ia64_process_pending_intr(void) 525{ 526 ia64_vector vector; 527 unsigned long saved_tpr; 528 extern unsigned int vectors_in_migration[NR_IRQS]; 529 530 vector = ia64_get_ivr(); 531 532 irq_enter(); 533 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 534 ia64_srlz_d(); 535 536 /* 537 * Perform normal interrupt style processing 538 */ 539 while (vector != IA64_SPURIOUS_INT_VECTOR) { 540 int irq = local_vector_to_irq(vector); 541 542 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { 543 smp_local_flush_tlb(); 544 kstat_incr_irq_this_cpu(irq); 545 } else if (unlikely(IS_RESCHEDULE(vector))) { 546 kstat_incr_irq_this_cpu(irq); 547 } else { 548 struct pt_regs *old_regs = set_irq_regs(NULL); 549 550 ia64_setreg(_IA64_REG_CR_TPR, vector); 551 ia64_srlz_d(); 552 553 /* 554 * Now try calling normal ia64_handle_irq as it would have got called 555 * from a real intr handler. Try passing null for pt_regs, hopefully 556 * it will work. I hope it works!. 557 * Probably could shared code. 558 */ 559 if (unlikely(irq < 0)) { 560 printk(KERN_ERR "%s: Unexpected interrupt " 561 "vector %d on CPU %d not being mapped " 562 "to any IRQ!!\n", __func__, vector, 563 smp_processor_id()); 564 } else { 565 vectors_in_migration[irq]=0; 566 generic_handle_irq(irq); 567 } 568 set_irq_regs(old_regs); 569 570 /* 571 * Disable interrupts and send EOI 572 */ 573 local_irq_disable(); 574 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); 575 } 576 ia64_eoi(); 577 vector = ia64_get_ivr(); 578 } 579 irq_exit(); 580} 581#endif 582 583 584#ifdef CONFIG_SMP 585 586static irqreturn_t dummy_handler (int irq, void *dev_id) 587{ 588 BUG(); 589} 590 591static struct irqaction ipi_irqaction = { 592 .handler = handle_IPI, 593 .name = "IPI" 594}; 595 596/* 597 * KVM uses this interrupt to force a cpu out of guest mode 598 */ 599static struct irqaction resched_irqaction = { 600 .handler = dummy_handler, 601 .name = "resched" 602}; 603 604static struct irqaction tlb_irqaction = { 605 .handler = dummy_handler, 606 .name = "tlb_flush" 607}; 608 609#endif 610 611void 612ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) 613{ 614 unsigned int irq; 615 616 irq = vec; 617 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); 618 irq_set_status_flags(irq, IRQ_PER_CPU); 619 irq_set_chip(irq, &irq_type_ia64_lsapic); 620 if (action) 621 setup_irq(irq, action); 622 irq_set_handler(irq, handle_percpu_irq); 623} 624 625void __init 626ia64_native_register_ipi(void) 627{ 628#ifdef CONFIG_SMP 629 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 630 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); 631 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); 632#endif 633} 634 635void __init 636init_IRQ (void) 637{ 638#ifdef CONFIG_ACPI 639 acpi_boot_init(); 640#endif 641 ia64_register_ipi(); 642 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 643#ifdef CONFIG_SMP 644#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) 645 if (vector_domain_type != VECTOR_DOMAIN_NONE) 646 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); 647#endif 648#endif 649#ifdef CONFIG_PERFMON 650 pfm_init_percpu(); 651#endif 652 platform_irq_init(); 653} 654 655void 656ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) 657{ 658 void __iomem *ipi_addr; 659 unsigned long ipi_data; 660 unsigned long phys_cpu_id; 661 662 phys_cpu_id = cpu_physical_id(cpu); 663 664 /* 665 * cpu number is in 8bit ID and 8bit EID 666 */ 667 668 ipi_data = (delivery_mode << 8) | (vector & 0xff); 669 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); 670 671 writeq(ipi_data, ipi_addr); 672} 673