irq_64.c revision 25ad403f67d7673f38a473ec138d240804785ae3
1/* irq.c: UltraSparc IRQ handling/init/registry. 2 * 3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) 6 */ 7 8#include <linux/module.h> 9#include <linux/sched.h> 10#include <linux/linkage.h> 11#include <linux/ptrace.h> 12#include <linux/errno.h> 13#include <linux/kernel_stat.h> 14#include <linux/signal.h> 15#include <linux/mm.h> 16#include <linux/interrupt.h> 17#include <linux/slab.h> 18#include <linux/random.h> 19#include <linux/init.h> 20#include <linux/delay.h> 21#include <linux/proc_fs.h> 22#include <linux/seq_file.h> 23#include <linux/ftrace.h> 24#include <linux/irq.h> 25 26#include <asm/ptrace.h> 27#include <asm/processor.h> 28#include <asm/atomic.h> 29#include <asm/system.h> 30#include <asm/irq.h> 31#include <asm/io.h> 32#include <asm/iommu.h> 33#include <asm/upa.h> 34#include <asm/oplib.h> 35#include <asm/prom.h> 36#include <asm/timer.h> 37#include <asm/smp.h> 38#include <asm/starfire.h> 39#include <asm/uaccess.h> 40#include <asm/cache.h> 41#include <asm/cpudata.h> 42#include <asm/auxio.h> 43#include <asm/head.h> 44#include <asm/hypervisor.h> 45#include <asm/cacheflush.h> 46 47#include "entry.h" 48#include "cpumap.h" 49 50#define NUM_IVECS (IMAP_INR + 1) 51 52struct ino_bucket *ivector_table; 53unsigned long ivector_table_pa; 54 55/* On several sun4u processors, it is illegal to mix bypass and 56 * non-bypass accesses. Therefore we access all INO buckets 57 * using bypass accesses only. 58 */ 59static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) 60{ 61 unsigned long ret; 62 63 __asm__ __volatile__("ldxa [%1] %2, %0" 64 : "=&r" (ret) 65 : "r" (bucket_pa + 66 offsetof(struct ino_bucket, 67 __irq_chain_pa)), 68 "i" (ASI_PHYS_USE_EC)); 69 70 return ret; 71} 72 73static void bucket_clear_chain_pa(unsigned long bucket_pa) 74{ 75 __asm__ __volatile__("stxa %%g0, [%0] %1" 76 : /* no outputs */ 77 : "r" (bucket_pa + 78 offsetof(struct ino_bucket, 79 __irq_chain_pa)), 80 "i" (ASI_PHYS_USE_EC)); 81} 82 83static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) 84{ 85 unsigned int ret; 86 87 __asm__ __volatile__("lduwa [%1] %2, %0" 88 : "=&r" (ret) 89 : "r" (bucket_pa + 90 offsetof(struct ino_bucket, 91 __virt_irq)), 92 "i" (ASI_PHYS_USE_EC)); 93 94 return ret; 95} 96 97static void bucket_set_virt_irq(unsigned long bucket_pa, 98 unsigned int virt_irq) 99{ 100 __asm__ __volatile__("stwa %0, [%1] %2" 101 : /* no outputs */ 102 : "r" (virt_irq), 103 "r" (bucket_pa + 104 offsetof(struct ino_bucket, 105 __virt_irq)), 106 "i" (ASI_PHYS_USE_EC)); 107} 108 109#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) 110 111static struct { 112 unsigned int dev_handle; 113 unsigned int dev_ino; 114 unsigned int in_use; 115} virt_irq_table[NR_IRQS]; 116static DEFINE_SPINLOCK(virt_irq_alloc_lock); 117 118unsigned char virt_irq_alloc(unsigned int dev_handle, 119 unsigned int dev_ino) 120{ 121 unsigned long flags; 122 unsigned char ent; 123 124 BUILD_BUG_ON(NR_IRQS >= 256); 125 126 spin_lock_irqsave(&virt_irq_alloc_lock, flags); 127 128 for (ent = 1; ent < NR_IRQS; ent++) { 129 if (!virt_irq_table[ent].in_use) 130 break; 131 } 132 if (ent >= NR_IRQS) { 133 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); 134 ent = 0; 135 } else { 136 virt_irq_table[ent].dev_handle = dev_handle; 137 virt_irq_table[ent].dev_ino = dev_ino; 138 virt_irq_table[ent].in_use = 1; 139 } 140 141 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); 142 143 return ent; 144} 145 146#ifdef CONFIG_PCI_MSI 147void virt_irq_free(unsigned int virt_irq) 148{ 149 unsigned long flags; 150 151 if (virt_irq >= NR_IRQS) 152 return; 153 154 spin_lock_irqsave(&virt_irq_alloc_lock, flags); 155 156 virt_irq_table[virt_irq].in_use = 0; 157 158 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); 159} 160#endif 161 162/* 163 * /proc/interrupts printing: 164 */ 165 166int show_interrupts(struct seq_file *p, void *v) 167{ 168 int i = *(loff_t *) v, j; 169 struct irqaction * action; 170 unsigned long flags; 171 172 if (i == 0) { 173 seq_printf(p, " "); 174 for_each_online_cpu(j) 175 seq_printf(p, "CPU%d ",j); 176 seq_putc(p, '\n'); 177 } 178 179 if (i < NR_IRQS) { 180 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 181 action = irq_desc[i].action; 182 if (!action) 183 goto skip; 184 seq_printf(p, "%3d: ",i); 185#ifndef CONFIG_SMP 186 seq_printf(p, "%10u ", kstat_irqs(i)); 187#else 188 for_each_online_cpu(j) 189 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 190#endif 191 seq_printf(p, " %9s", irq_desc[i].chip->name); 192 seq_printf(p, " %s", action->name); 193 194 for (action=action->next; action; action = action->next) 195 seq_printf(p, ", %s", action->name); 196 197 seq_putc(p, '\n'); 198skip: 199 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 200 } else if (i == NR_IRQS) { 201 seq_printf(p, "NMI: "); 202 for_each_online_cpu(j) 203 seq_printf(p, "%10u ", cpu_data(j).__nmi_count); 204 seq_printf(p, " Non-maskable interrupts\n"); 205 } 206 return 0; 207} 208 209static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) 210{ 211 unsigned int tid; 212 213 if (this_is_starfire) { 214 tid = starfire_translate(imap, cpuid); 215 tid <<= IMAP_TID_SHIFT; 216 tid &= IMAP_TID_UPA; 217 } else { 218 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 219 unsigned long ver; 220 221 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 222 if ((ver >> 32UL) == __JALAPENO_ID || 223 (ver >> 32UL) == __SERRANO_ID) { 224 tid = cpuid << IMAP_TID_SHIFT; 225 tid &= IMAP_TID_JBUS; 226 } else { 227 unsigned int a = cpuid & 0x1f; 228 unsigned int n = (cpuid >> 5) & 0x1f; 229 230 tid = ((a << IMAP_AID_SHIFT) | 231 (n << IMAP_NID_SHIFT)); 232 tid &= (IMAP_AID_SAFARI | 233 IMAP_NID_SAFARI); 234 } 235 } else { 236 tid = cpuid << IMAP_TID_SHIFT; 237 tid &= IMAP_TID_UPA; 238 } 239 } 240 241 return tid; 242} 243 244struct irq_handler_data { 245 unsigned long iclr; 246 unsigned long imap; 247 248 void (*pre_handler)(unsigned int, void *, void *); 249 void *arg1; 250 void *arg2; 251}; 252 253#ifdef CONFIG_SMP 254static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity) 255{ 256 cpumask_t mask; 257 int cpuid; 258 259 cpumask_copy(&mask, affinity); 260 if (cpus_equal(mask, cpu_online_map)) { 261 cpuid = map_to_cpu(virt_irq); 262 } else { 263 cpumask_t tmp; 264 265 cpus_and(tmp, cpu_online_map, mask); 266 cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp); 267 } 268 269 return cpuid; 270} 271#else 272#define irq_choose_cpu(virt_irq, affinity) \ 273 real_hard_smp_processor_id() 274#endif 275 276static void sun4u_irq_enable(unsigned int virt_irq) 277{ 278 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 279 280 if (likely(data)) { 281 unsigned long cpuid, imap, val; 282 unsigned int tid; 283 284 cpuid = irq_choose_cpu(virt_irq, 285 irq_desc[virt_irq].affinity); 286 imap = data->imap; 287 288 tid = sun4u_compute_tid(imap, cpuid); 289 290 val = upa_readq(imap); 291 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | 292 IMAP_AID_SAFARI | IMAP_NID_SAFARI); 293 val |= tid | IMAP_VALID; 294 upa_writeq(val, imap); 295 upa_writeq(ICLR_IDLE, data->iclr); 296 } 297} 298 299static int sun4u_set_affinity(unsigned int virt_irq, 300 const struct cpumask *mask) 301{ 302 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 303 304 if (likely(data)) { 305 unsigned long cpuid, imap, val; 306 unsigned int tid; 307 308 cpuid = irq_choose_cpu(virt_irq, mask); 309 imap = data->imap; 310 311 tid = sun4u_compute_tid(imap, cpuid); 312 313 val = upa_readq(imap); 314 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | 315 IMAP_AID_SAFARI | IMAP_NID_SAFARI); 316 val |= tid | IMAP_VALID; 317 upa_writeq(val, imap); 318 upa_writeq(ICLR_IDLE, data->iclr); 319 } 320 321 return 0; 322} 323 324/* Don't do anything. The desc->status check for IRQ_DISABLED in 325 * handler_irq() will skip the handler call and that will leave the 326 * interrupt in the sent state. The next ->enable() call will hit the 327 * ICLR register to reset the state machine. 328 * 329 * This scheme is necessary, instead of clearing the Valid bit in the 330 * IMAP register, to handle the case of IMAP registers being shared by 331 * multiple INOs (and thus ICLR registers). Since we use a different 332 * virtual IRQ for each shared IMAP instance, the generic code thinks 333 * there is only one user so it prematurely calls ->disable() on 334 * free_irq(). 335 * 336 * We have to provide an explicit ->disable() method instead of using 337 * NULL to get the default. The reason is that if the generic code 338 * sees that, it also hooks up a default ->shutdown method which 339 * invokes ->mask() which we do not want. See irq_chip_set_defaults(). 340 */ 341static void sun4u_irq_disable(unsigned int virt_irq) 342{ 343} 344 345static void sun4u_irq_eoi(unsigned int virt_irq) 346{ 347 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 348 struct irq_desc *desc = irq_desc + virt_irq; 349 350 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 351 return; 352 353 if (likely(data)) 354 upa_writeq(ICLR_IDLE, data->iclr); 355} 356 357static void sun4v_irq_enable(unsigned int virt_irq) 358{ 359 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 360 unsigned long cpuid = irq_choose_cpu(virt_irq, 361 irq_desc[virt_irq].affinity); 362 int err; 363 364 err = sun4v_intr_settarget(ino, cpuid); 365 if (err != HV_EOK) 366 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " 367 "err(%d)\n", ino, cpuid, err); 368 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 369 if (err != HV_EOK) 370 printk(KERN_ERR "sun4v_intr_setstate(%x): " 371 "err(%d)\n", ino, err); 372 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); 373 if (err != HV_EOK) 374 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", 375 ino, err); 376} 377 378static int sun4v_set_affinity(unsigned int virt_irq, 379 const struct cpumask *mask) 380{ 381 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 382 unsigned long cpuid = irq_choose_cpu(virt_irq, mask); 383 int err; 384 385 err = sun4v_intr_settarget(ino, cpuid); 386 if (err != HV_EOK) 387 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " 388 "err(%d)\n", ino, cpuid, err); 389 390 return 0; 391} 392 393static void sun4v_irq_disable(unsigned int virt_irq) 394{ 395 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 396 int err; 397 398 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); 399 if (err != HV_EOK) 400 printk(KERN_ERR "sun4v_intr_setenabled(%x): " 401 "err(%d)\n", ino, err); 402} 403 404static void sun4v_irq_eoi(unsigned int virt_irq) 405{ 406 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 407 struct irq_desc *desc = irq_desc + virt_irq; 408 int err; 409 410 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 411 return; 412 413 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 414 if (err != HV_EOK) 415 printk(KERN_ERR "sun4v_intr_setstate(%x): " 416 "err(%d)\n", ino, err); 417} 418 419static void sun4v_virq_enable(unsigned int virt_irq) 420{ 421 unsigned long cpuid, dev_handle, dev_ino; 422 int err; 423 424 cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity); 425 426 dev_handle = virt_irq_table[virt_irq].dev_handle; 427 dev_ino = virt_irq_table[virt_irq].dev_ino; 428 429 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 430 if (err != HV_EOK) 431 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " 432 "err(%d)\n", 433 dev_handle, dev_ino, cpuid, err); 434 err = sun4v_vintr_set_state(dev_handle, dev_ino, 435 HV_INTR_STATE_IDLE); 436 if (err != HV_EOK) 437 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," 438 "HV_INTR_STATE_IDLE): err(%d)\n", 439 dev_handle, dev_ino, err); 440 err = sun4v_vintr_set_valid(dev_handle, dev_ino, 441 HV_INTR_ENABLED); 442 if (err != HV_EOK) 443 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," 444 "HV_INTR_ENABLED): err(%d)\n", 445 dev_handle, dev_ino, err); 446} 447 448static int sun4v_virt_set_affinity(unsigned int virt_irq, 449 const struct cpumask *mask) 450{ 451 unsigned long cpuid, dev_handle, dev_ino; 452 int err; 453 454 cpuid = irq_choose_cpu(virt_irq, mask); 455 456 dev_handle = virt_irq_table[virt_irq].dev_handle; 457 dev_ino = virt_irq_table[virt_irq].dev_ino; 458 459 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 460 if (err != HV_EOK) 461 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " 462 "err(%d)\n", 463 dev_handle, dev_ino, cpuid, err); 464 465 return 0; 466} 467 468static void sun4v_virq_disable(unsigned int virt_irq) 469{ 470 unsigned long dev_handle, dev_ino; 471 int err; 472 473 dev_handle = virt_irq_table[virt_irq].dev_handle; 474 dev_ino = virt_irq_table[virt_irq].dev_ino; 475 476 err = sun4v_vintr_set_valid(dev_handle, dev_ino, 477 HV_INTR_DISABLED); 478 if (err != HV_EOK) 479 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," 480 "HV_INTR_DISABLED): err(%d)\n", 481 dev_handle, dev_ino, err); 482} 483 484static void sun4v_virq_eoi(unsigned int virt_irq) 485{ 486 struct irq_desc *desc = irq_desc + virt_irq; 487 unsigned long dev_handle, dev_ino; 488 int err; 489 490 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 491 return; 492 493 dev_handle = virt_irq_table[virt_irq].dev_handle; 494 dev_ino = virt_irq_table[virt_irq].dev_ino; 495 496 err = sun4v_vintr_set_state(dev_handle, dev_ino, 497 HV_INTR_STATE_IDLE); 498 if (err != HV_EOK) 499 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," 500 "HV_INTR_STATE_IDLE): err(%d)\n", 501 dev_handle, dev_ino, err); 502} 503 504static struct irq_chip sun4u_irq = { 505 .name = "sun4u", 506 .enable = sun4u_irq_enable, 507 .disable = sun4u_irq_disable, 508 .eoi = sun4u_irq_eoi, 509 .set_affinity = sun4u_set_affinity, 510}; 511 512static struct irq_chip sun4v_irq = { 513 .name = "sun4v", 514 .enable = sun4v_irq_enable, 515 .disable = sun4v_irq_disable, 516 .eoi = sun4v_irq_eoi, 517 .set_affinity = sun4v_set_affinity, 518}; 519 520static struct irq_chip sun4v_virq = { 521 .name = "vsun4v", 522 .enable = sun4v_virq_enable, 523 .disable = sun4v_virq_disable, 524 .eoi = sun4v_virq_eoi, 525 .set_affinity = sun4v_virt_set_affinity, 526}; 527 528static void pre_flow_handler(unsigned int virt_irq, 529 struct irq_desc *desc) 530{ 531 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 532 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 533 534 data->pre_handler(ino, data->arg1, data->arg2); 535 536 handle_fasteoi_irq(virt_irq, desc); 537} 538 539void irq_install_pre_handler(int virt_irq, 540 void (*func)(unsigned int, void *, void *), 541 void *arg1, void *arg2) 542{ 543 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 544 struct irq_desc *desc = irq_desc + virt_irq; 545 546 data->pre_handler = func; 547 data->arg1 = arg1; 548 data->arg2 = arg2; 549 550 desc->handle_irq = pre_flow_handler; 551} 552 553unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) 554{ 555 struct ino_bucket *bucket; 556 struct irq_handler_data *data; 557 unsigned int virt_irq; 558 int ino; 559 560 BUG_ON(tlb_type == hypervisor); 561 562 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; 563 bucket = &ivector_table[ino]; 564 virt_irq = bucket_get_virt_irq(__pa(bucket)); 565 if (!virt_irq) { 566 virt_irq = virt_irq_alloc(0, ino); 567 bucket_set_virt_irq(__pa(bucket), virt_irq); 568 set_irq_chip_and_handler_name(virt_irq, 569 &sun4u_irq, 570 handle_fasteoi_irq, 571 "IVEC"); 572 } 573 574 data = get_irq_chip_data(virt_irq); 575 if (unlikely(data)) 576 goto out; 577 578 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 579 if (unlikely(!data)) { 580 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 581 prom_halt(); 582 } 583 set_irq_chip_data(virt_irq, data); 584 585 data->imap = imap; 586 data->iclr = iclr; 587 588out: 589 return virt_irq; 590} 591 592static unsigned int sun4v_build_common(unsigned long sysino, 593 struct irq_chip *chip) 594{ 595 struct ino_bucket *bucket; 596 struct irq_handler_data *data; 597 unsigned int virt_irq; 598 599 BUG_ON(tlb_type != hypervisor); 600 601 bucket = &ivector_table[sysino]; 602 virt_irq = bucket_get_virt_irq(__pa(bucket)); 603 if (!virt_irq) { 604 virt_irq = virt_irq_alloc(0, sysino); 605 bucket_set_virt_irq(__pa(bucket), virt_irq); 606 set_irq_chip_and_handler_name(virt_irq, chip, 607 handle_fasteoi_irq, 608 "IVEC"); 609 } 610 611 data = get_irq_chip_data(virt_irq); 612 if (unlikely(data)) 613 goto out; 614 615 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 616 if (unlikely(!data)) { 617 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 618 prom_halt(); 619 } 620 set_irq_chip_data(virt_irq, data); 621 622 /* Catch accidental accesses to these things. IMAP/ICLR handling 623 * is done by hypervisor calls on sun4v platforms, not by direct 624 * register accesses. 625 */ 626 data->imap = ~0UL; 627 data->iclr = ~0UL; 628 629out: 630 return virt_irq; 631} 632 633unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) 634{ 635 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); 636 637 return sun4v_build_common(sysino, &sun4v_irq); 638} 639 640unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) 641{ 642 struct irq_handler_data *data; 643 unsigned long hv_err, cookie; 644 struct ino_bucket *bucket; 645 struct irq_desc *desc; 646 unsigned int virt_irq; 647 648 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 649 if (unlikely(!bucket)) 650 return 0; 651 652 /* The only reference we store to the IRQ bucket is 653 * by physical address which kmemleak can't see, tell 654 * it that this object explicitly is not a leak and 655 * should be scanned. 656 */ 657 kmemleak_not_leak(bucket); 658 659 __flush_dcache_range((unsigned long) bucket, 660 ((unsigned long) bucket + 661 sizeof(struct ino_bucket))); 662 663 virt_irq = virt_irq_alloc(devhandle, devino); 664 bucket_set_virt_irq(__pa(bucket), virt_irq); 665 666 set_irq_chip_and_handler_name(virt_irq, &sun4v_virq, 667 handle_fasteoi_irq, 668 "IVEC"); 669 670 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 671 if (unlikely(!data)) 672 return 0; 673 674 /* In order to make the LDC channel startup sequence easier, 675 * especially wrt. locking, we do not let request_irq() enable 676 * the interrupt. 677 */ 678 desc = irq_desc + virt_irq; 679 desc->status |= IRQ_NOAUTOEN; 680 681 set_irq_chip_data(virt_irq, data); 682 683 /* Catch accidental accesses to these things. IMAP/ICLR handling 684 * is done by hypervisor calls on sun4v platforms, not by direct 685 * register accesses. 686 */ 687 data->imap = ~0UL; 688 data->iclr = ~0UL; 689 690 cookie = ~__pa(bucket); 691 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); 692 if (hv_err) { 693 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " 694 "err=%lu\n", devhandle, devino, hv_err); 695 prom_halt(); 696 } 697 698 return virt_irq; 699} 700 701void ack_bad_irq(unsigned int virt_irq) 702{ 703 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 704 705 if (!ino) 706 ino = 0xdeadbeef; 707 708 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", 709 ino, virt_irq); 710} 711 712void *hardirq_stack[NR_CPUS]; 713void *softirq_stack[NR_CPUS]; 714 715static __attribute__((always_inline)) void *set_hardirq_stack(void) 716{ 717 void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; 718 719 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); 720 if (orig_sp < sp || 721 orig_sp > (sp + THREAD_SIZE)) { 722 sp += THREAD_SIZE - 192 - STACK_BIAS; 723 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); 724 } 725 726 return orig_sp; 727} 728static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) 729{ 730 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); 731} 732 733void __irq_entry handler_irq(int irq, struct pt_regs *regs) 734{ 735 unsigned long pstate, bucket_pa; 736 struct pt_regs *old_regs; 737 void *orig_sp; 738 739 clear_softint(1 << irq); 740 741 old_regs = set_irq_regs(regs); 742 irq_enter(); 743 744 /* Grab an atomic snapshot of the pending IVECs. */ 745 __asm__ __volatile__("rdpr %%pstate, %0\n\t" 746 "wrpr %0, %3, %%pstate\n\t" 747 "ldx [%2], %1\n\t" 748 "stx %%g0, [%2]\n\t" 749 "wrpr %0, 0x0, %%pstate\n\t" 750 : "=&r" (pstate), "=&r" (bucket_pa) 751 : "r" (irq_work_pa(smp_processor_id())), 752 "i" (PSTATE_IE) 753 : "memory"); 754 755 orig_sp = set_hardirq_stack(); 756 757 while (bucket_pa) { 758 struct irq_desc *desc; 759 unsigned long next_pa; 760 unsigned int virt_irq; 761 762 next_pa = bucket_get_chain_pa(bucket_pa); 763 virt_irq = bucket_get_virt_irq(bucket_pa); 764 bucket_clear_chain_pa(bucket_pa); 765 766 desc = irq_desc + virt_irq; 767 768 if (!(desc->status & IRQ_DISABLED)) 769 desc->handle_irq(virt_irq, desc); 770 771 bucket_pa = next_pa; 772 } 773 774 restore_hardirq_stack(orig_sp); 775 776 irq_exit(); 777 set_irq_regs(old_regs); 778} 779 780void do_softirq(void) 781{ 782 unsigned long flags; 783 784 if (in_interrupt()) 785 return; 786 787 local_irq_save(flags); 788 789 if (local_softirq_pending()) { 790 void *orig_sp, *sp = softirq_stack[smp_processor_id()]; 791 792 sp += THREAD_SIZE - 192 - STACK_BIAS; 793 794 __asm__ __volatile__("mov %%sp, %0\n\t" 795 "mov %1, %%sp" 796 : "=&r" (orig_sp) 797 : "r" (sp)); 798 __do_softirq(); 799 __asm__ __volatile__("mov %0, %%sp" 800 : : "r" (orig_sp)); 801 } 802 803 local_irq_restore(flags); 804} 805 806#ifdef CONFIG_HOTPLUG_CPU 807void fixup_irqs(void) 808{ 809 unsigned int irq; 810 811 for (irq = 0; irq < NR_IRQS; irq++) { 812 unsigned long flags; 813 814 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); 815 if (irq_desc[irq].action && 816 !(irq_desc[irq].status & IRQ_PER_CPU)) { 817 if (irq_desc[irq].chip->set_affinity) 818 irq_desc[irq].chip->set_affinity(irq, 819 irq_desc[irq].affinity); 820 } 821 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 822 } 823 824 tick_ops->disable_irq(); 825} 826#endif 827 828struct sun5_timer { 829 u64 count0; 830 u64 limit0; 831 u64 count1; 832 u64 limit1; 833}; 834 835static struct sun5_timer *prom_timers; 836static u64 prom_limit0, prom_limit1; 837 838static void map_prom_timers(void) 839{ 840 struct device_node *dp; 841 const unsigned int *addr; 842 843 /* PROM timer node hangs out in the top level of device siblings... */ 844 dp = of_find_node_by_path("/"); 845 dp = dp->child; 846 while (dp) { 847 if (!strcmp(dp->name, "counter-timer")) 848 break; 849 dp = dp->sibling; 850 } 851 852 /* Assume if node is not present, PROM uses different tick mechanism 853 * which we should not care about. 854 */ 855 if (!dp) { 856 prom_timers = (struct sun5_timer *) 0; 857 return; 858 } 859 860 /* If PROM is really using this, it must be mapped by him. */ 861 addr = of_get_property(dp, "address", NULL); 862 if (!addr) { 863 prom_printf("PROM does not have timer mapped, trying to continue.\n"); 864 prom_timers = (struct sun5_timer *) 0; 865 return; 866 } 867 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); 868} 869 870static void kill_prom_timer(void) 871{ 872 if (!prom_timers) 873 return; 874 875 /* Save them away for later. */ 876 prom_limit0 = prom_timers->limit0; 877 prom_limit1 = prom_timers->limit1; 878 879 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. 880 * We turn both off here just to be paranoid. 881 */ 882 prom_timers->limit0 = 0; 883 prom_timers->limit1 = 0; 884 885 /* Wheee, eat the interrupt packet too... */ 886 __asm__ __volatile__( 887" mov 0x40, %%g2\n" 888" ldxa [%%g0] %0, %%g1\n" 889" ldxa [%%g2] %1, %%g1\n" 890" stxa %%g0, [%%g0] %0\n" 891" membar #Sync\n" 892 : /* no outputs */ 893 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) 894 : "g1", "g2"); 895} 896 897void notrace init_irqwork_curcpu(void) 898{ 899 int cpu = hard_smp_processor_id(); 900 901 trap_block[cpu].irq_worklist_pa = 0UL; 902} 903 904/* Please be very careful with register_one_mondo() and 905 * sun4v_register_mondo_queues(). 906 * 907 * On SMP this gets invoked from the CPU trampoline before 908 * the cpu has fully taken over the trap table from OBP, 909 * and it's kernel stack + %g6 thread register state is 910 * not fully cooked yet. 911 * 912 * Therefore you cannot make any OBP calls, not even prom_printf, 913 * from these two routines. 914 */ 915static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) 916{ 917 unsigned long num_entries = (qmask + 1) / 64; 918 unsigned long status; 919 920 status = sun4v_cpu_qconf(type, paddr, num_entries); 921 if (status != HV_EOK) { 922 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " 923 "err %lu\n", type, paddr, num_entries, status); 924 prom_halt(); 925 } 926} 927 928void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) 929{ 930 struct trap_per_cpu *tb = &trap_block[this_cpu]; 931 932 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, 933 tb->cpu_mondo_qmask); 934 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, 935 tb->dev_mondo_qmask); 936 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, 937 tb->resum_qmask); 938 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, 939 tb->nonresum_qmask); 940} 941 942/* Each queue region must be a power of 2 multiple of 64 bytes in 943 * size. The base real address must be aligned to the size of the 944 * region. Thus, an 8KB queue must be 8KB aligned, for example. 945 */ 946static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) 947{ 948 unsigned long size = PAGE_ALIGN(qmask + 1); 949 unsigned long order = get_order(size); 950 unsigned long p; 951 952 p = __get_free_pages(GFP_KERNEL, order); 953 if (!p) { 954 prom_printf("SUN4V: Error, cannot allocate queue.\n"); 955 prom_halt(); 956 } 957 958 *pa_ptr = __pa(p); 959} 960 961static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) 962{ 963#ifdef CONFIG_SMP 964 unsigned long page; 965 966 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); 967 968 page = get_zeroed_page(GFP_KERNEL); 969 if (!page) { 970 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); 971 prom_halt(); 972 } 973 974 tb->cpu_mondo_block_pa = __pa(page); 975 tb->cpu_list_pa = __pa(page + 64); 976#endif 977} 978 979/* Allocate mondo and error queues for all possible cpus. */ 980static void __init sun4v_init_mondo_queues(void) 981{ 982 int cpu; 983 984 for_each_possible_cpu(cpu) { 985 struct trap_per_cpu *tb = &trap_block[cpu]; 986 987 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); 988 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); 989 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); 990 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); 991 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); 992 alloc_one_queue(&tb->nonresum_kernel_buf_pa, 993 tb->nonresum_qmask); 994 } 995} 996 997static void __init init_send_mondo_info(void) 998{ 999 int cpu; 1000 1001 for_each_possible_cpu(cpu) { 1002 struct trap_per_cpu *tb = &trap_block[cpu]; 1003 1004 init_cpu_send_mondo_info(tb); 1005 } 1006} 1007 1008static struct irqaction timer_irq_action = { 1009 .name = "timer", 1010}; 1011 1012/* Only invoked on boot processor. */ 1013void __init init_IRQ(void) 1014{ 1015 unsigned long size; 1016 1017 map_prom_timers(); 1018 kill_prom_timer(); 1019 1020 size = sizeof(struct ino_bucket) * NUM_IVECS; 1021 ivector_table = kzalloc(size, GFP_KERNEL); 1022 if (!ivector_table) { 1023 prom_printf("Fatal error, cannot allocate ivector_table\n"); 1024 prom_halt(); 1025 } 1026 __flush_dcache_range((unsigned long) ivector_table, 1027 ((unsigned long) ivector_table) + size); 1028 1029 ivector_table_pa = __pa(ivector_table); 1030 1031 if (tlb_type == hypervisor) 1032 sun4v_init_mondo_queues(); 1033 1034 init_send_mondo_info(); 1035 1036 if (tlb_type == hypervisor) { 1037 /* Load up the boot cpu's entries. */ 1038 sun4v_register_mondo_queues(hard_smp_processor_id()); 1039 } 1040 1041 /* We need to clear any IRQ's pending in the soft interrupt 1042 * registers, a spurious one could be left around from the 1043 * PROM timer which we just disabled. 1044 */ 1045 clear_softint(get_softint()); 1046 1047 /* Now that ivector table is initialized, it is safe 1048 * to receive IRQ vector traps. We will normally take 1049 * one or two right now, in case some device PROM used 1050 * to boot us wants to speak to us. We just ignore them. 1051 */ 1052 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" 1053 "or %%g1, %0, %%g1\n\t" 1054 "wrpr %%g1, 0x0, %%pstate" 1055 : /* No outputs */ 1056 : "i" (PSTATE_IE) 1057 : "g1"); 1058 1059 irq_desc[0].action = &timer_irq_action; 1060} 1061