1#include <linux/interrupt.h> 2#include <linux/dmar.h> 3#include <linux/spinlock.h> 4#include <linux/slab.h> 5#include <linux/jiffies.h> 6#include <linux/hpet.h> 7#include <linux/pci.h> 8#include <linux/irq.h> 9#include <asm/io_apic.h> 10#include <asm/smp.h> 11#include <asm/cpu.h> 12#include <linux/intel-iommu.h> 13#include "intr_remapping.h" 14#include <acpi/acpi.h> 15#include <asm/pci-direct.h> 16 17static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 18static struct hpet_scope ir_hpet[MAX_HPET_TBS]; 19static int ir_ioapic_num, ir_hpet_num; 20int intr_remapping_enabled; 21 22static int disable_intremap; 23static int disable_sourceid_checking; 24static int no_x2apic_optout; 25 26static __init int setup_nointremap(char *str) 27{ 28 disable_intremap = 1; 29 return 0; 30} 31early_param("nointremap", setup_nointremap); 32 33static __init int setup_intremap(char *str) 34{ 35 if (!str) 36 return -EINVAL; 37 38 while (*str) { 39 if (!strncmp(str, "on", 2)) 40 disable_intremap = 0; 41 else if (!strncmp(str, "off", 3)) 42 disable_intremap = 1; 43 else if (!strncmp(str, "nosid", 5)) 44 disable_sourceid_checking = 1; 45 else if (!strncmp(str, "no_x2apic_optout", 16)) 46 no_x2apic_optout = 1; 47 48 str += strcspn(str, ","); 49 while (*str == ',') 50 str++; 51 } 52 53 return 0; 54} 55early_param("intremap", setup_intremap); 56 57static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); 58 59static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 60{ 61 struct irq_cfg *cfg = irq_get_chip_data(irq); 62 return cfg ? &cfg->irq_2_iommu : NULL; 63} 64 65int get_irte(int irq, struct irte *entry) 66{ 67 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 68 unsigned long flags; 69 int index; 70 71 if (!entry || !irq_iommu) 72 return -1; 73 74 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 75 76 index = irq_iommu->irte_index + irq_iommu->sub_handle; 77 *entry = *(irq_iommu->iommu->ir_table->base + index); 78 79 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 80 return 0; 81} 82 83int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 84{ 85 struct ir_table *table = iommu->ir_table; 86 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 87 u16 index, start_index; 88 unsigned int mask = 0; 89 unsigned long flags; 90 int i; 91 92 if (!count || !irq_iommu) 93 return -1; 94 95 /* 96 * start the IRTE search from index 0. 97 */ 98 index = start_index = 0; 99 100 if (count > 1) { 101 count = __roundup_pow_of_two(count); 102 mask = ilog2(count); 103 } 104 105 if (mask > ecap_max_handle_mask(iommu->ecap)) { 106 printk(KERN_ERR 107 "Requested mask %x exceeds the max invalidation handle" 108 " mask value %Lx\n", mask, 109 ecap_max_handle_mask(iommu->ecap)); 110 return -1; 111 } 112 113 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 114 do { 115 for (i = index; i < index + count; i++) 116 if (table->base[i].present) 117 break; 118 /* empty index found */ 119 if (i == index + count) 120 break; 121 122 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 123 124 if (index == start_index) { 125 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 126 printk(KERN_ERR "can't allocate an IRTE\n"); 127 return -1; 128 } 129 } while (1); 130 131 for (i = index; i < index + count; i++) 132 table->base[i].present = 1; 133 134 irq_iommu->iommu = iommu; 135 irq_iommu->irte_index = index; 136 irq_iommu->sub_handle = 0; 137 irq_iommu->irte_mask = mask; 138 139 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 140 141 return index; 142} 143 144static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) 145{ 146 struct qi_desc desc; 147 148 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) 149 | QI_IEC_SELECTIVE; 150 desc.high = 0; 151 152 return qi_submit_sync(&desc, iommu); 153} 154 155int map_irq_to_irte_handle(int irq, u16 *sub_handle) 156{ 157 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 158 unsigned long flags; 159 int index; 160 161 if (!irq_iommu) 162 return -1; 163 164 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 165 *sub_handle = irq_iommu->sub_handle; 166 index = irq_iommu->irte_index; 167 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 168 return index; 169} 170 171int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 172{ 173 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 174 unsigned long flags; 175 176 if (!irq_iommu) 177 return -1; 178 179 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 180 181 irq_iommu->iommu = iommu; 182 irq_iommu->irte_index = index; 183 irq_iommu->sub_handle = subhandle; 184 irq_iommu->irte_mask = 0; 185 186 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 187 188 return 0; 189} 190 191int modify_irte(int irq, struct irte *irte_modified) 192{ 193 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 194 struct intel_iommu *iommu; 195 unsigned long flags; 196 struct irte *irte; 197 int rc, index; 198 199 if (!irq_iommu) 200 return -1; 201 202 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 203 204 iommu = irq_iommu->iommu; 205 206 index = irq_iommu->irte_index + irq_iommu->sub_handle; 207 irte = &iommu->ir_table->base[index]; 208 209 set_64bit(&irte->low, irte_modified->low); 210 set_64bit(&irte->high, irte_modified->high); 211 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 212 213 rc = qi_flush_iec(iommu, index, 0); 214 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 215 216 return rc; 217} 218 219struct intel_iommu *map_hpet_to_ir(u8 hpet_id) 220{ 221 int i; 222 223 for (i = 0; i < MAX_HPET_TBS; i++) 224 if (ir_hpet[i].id == hpet_id) 225 return ir_hpet[i].iommu; 226 return NULL; 227} 228 229struct intel_iommu *map_ioapic_to_ir(int apic) 230{ 231 int i; 232 233 for (i = 0; i < MAX_IO_APICS; i++) 234 if (ir_ioapic[i].id == apic) 235 return ir_ioapic[i].iommu; 236 return NULL; 237} 238 239struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) 240{ 241 struct dmar_drhd_unit *drhd; 242 243 drhd = dmar_find_matched_drhd_unit(dev); 244 if (!drhd) 245 return NULL; 246 247 return drhd->iommu; 248} 249 250static int clear_entries(struct irq_2_iommu *irq_iommu) 251{ 252 struct irte *start, *entry, *end; 253 struct intel_iommu *iommu; 254 int index; 255 256 if (irq_iommu->sub_handle) 257 return 0; 258 259 iommu = irq_iommu->iommu; 260 index = irq_iommu->irte_index + irq_iommu->sub_handle; 261 262 start = iommu->ir_table->base + index; 263 end = start + (1 << irq_iommu->irte_mask); 264 265 for (entry = start; entry < end; entry++) { 266 set_64bit(&entry->low, 0); 267 set_64bit(&entry->high, 0); 268 } 269 270 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); 271} 272 273int free_irte(int irq) 274{ 275 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 276 unsigned long flags; 277 int rc; 278 279 if (!irq_iommu) 280 return -1; 281 282 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 283 284 rc = clear_entries(irq_iommu); 285 286 irq_iommu->iommu = NULL; 287 irq_iommu->irte_index = 0; 288 irq_iommu->sub_handle = 0; 289 irq_iommu->irte_mask = 0; 290 291 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 292 293 return rc; 294} 295 296/* 297 * source validation type 298 */ 299#define SVT_NO_VERIFY 0x0 /* no verification is required */ 300#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */ 301#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ 302 303/* 304 * source-id qualifier 305 */ 306#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ 307#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore 308 * the third least significant bit 309 */ 310#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore 311 * the second and third least significant bits 312 */ 313#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore 314 * the least three significant bits 315 */ 316 317/* 318 * set SVT, SQ and SID fields of irte to verify 319 * source ids of interrupt requests 320 */ 321static void set_irte_sid(struct irte *irte, unsigned int svt, 322 unsigned int sq, unsigned int sid) 323{ 324 if (disable_sourceid_checking) 325 svt = SVT_NO_VERIFY; 326 irte->svt = svt; 327 irte->sq = sq; 328 irte->sid = sid; 329} 330 331int set_ioapic_sid(struct irte *irte, int apic) 332{ 333 int i; 334 u16 sid = 0; 335 336 if (!irte) 337 return -1; 338 339 for (i = 0; i < MAX_IO_APICS; i++) { 340 if (ir_ioapic[i].id == apic) { 341 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; 342 break; 343 } 344 } 345 346 if (sid == 0) { 347 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); 348 return -1; 349 } 350 351 set_irte_sid(irte, 1, 0, sid); 352 353 return 0; 354} 355 356int set_hpet_sid(struct irte *irte, u8 id) 357{ 358 int i; 359 u16 sid = 0; 360 361 if (!irte) 362 return -1; 363 364 for (i = 0; i < MAX_HPET_TBS; i++) { 365 if (ir_hpet[i].id == id) { 366 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; 367 break; 368 } 369 } 370 371 if (sid == 0) { 372 pr_warning("Failed to set source-id of HPET block (%d)\n", id); 373 return -1; 374 } 375 376 /* 377 * Should really use SQ_ALL_16. Some platforms are broken. 378 * While we figure out the right quirks for these broken platforms, use 379 * SQ_13_IGNORE_3 for now. 380 */ 381 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); 382 383 return 0; 384} 385 386int set_msi_sid(struct irte *irte, struct pci_dev *dev) 387{ 388 struct pci_dev *bridge; 389 390 if (!irte || !dev) 391 return -1; 392 393 /* PCIe device or Root Complex integrated PCI device */ 394 if (pci_is_pcie(dev) || !dev->bus->parent) { 395 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, 396 (dev->bus->number << 8) | dev->devfn); 397 return 0; 398 } 399 400 bridge = pci_find_upstream_pcie_bridge(dev); 401 if (bridge) { 402 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ 403 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, 404 (bridge->bus->number << 8) | dev->bus->number); 405 else /* this is a legacy PCI bridge */ 406 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, 407 (bridge->bus->number << 8) | bridge->devfn); 408 } 409 410 return 0; 411} 412 413static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 414{ 415 u64 addr; 416 u32 sts; 417 unsigned long flags; 418 419 addr = virt_to_phys((void *)iommu->ir_table->base); 420 421 raw_spin_lock_irqsave(&iommu->register_lock, flags); 422 423 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 424 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 425 426 /* Set interrupt-remapping table pointer */ 427 iommu->gcmd |= DMA_GCMD_SIRTP; 428 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 429 430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 431 readl, (sts & DMA_GSTS_IRTPS), sts); 432 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 433 434 /* 435 * global invalidation of interrupt entry cache before enabling 436 * interrupt-remapping. 437 */ 438 qi_global_iec(iommu); 439 440 raw_spin_lock_irqsave(&iommu->register_lock, flags); 441 442 /* Enable interrupt-remapping */ 443 iommu->gcmd |= DMA_GCMD_IRE; 444 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 445 446 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 447 readl, (sts & DMA_GSTS_IRES), sts); 448 449 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 450} 451 452 453static int setup_intr_remapping(struct intel_iommu *iommu, int mode) 454{ 455 struct ir_table *ir_table; 456 struct page *pages; 457 458 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 459 GFP_ATOMIC); 460 461 if (!iommu->ir_table) 462 return -ENOMEM; 463 464 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 465 INTR_REMAP_PAGE_ORDER); 466 467 if (!pages) { 468 printk(KERN_ERR "failed to allocate pages of order %d\n", 469 INTR_REMAP_PAGE_ORDER); 470 kfree(iommu->ir_table); 471 return -ENOMEM; 472 } 473 474 ir_table->base = page_address(pages); 475 476 iommu_set_intr_remapping(iommu, mode); 477 return 0; 478} 479 480/* 481 * Disable Interrupt Remapping. 482 */ 483static void iommu_disable_intr_remapping(struct intel_iommu *iommu) 484{ 485 unsigned long flags; 486 u32 sts; 487 488 if (!ecap_ir_support(iommu->ecap)) 489 return; 490 491 /* 492 * global invalidation of interrupt entry cache before disabling 493 * interrupt-remapping. 494 */ 495 qi_global_iec(iommu); 496 497 raw_spin_lock_irqsave(&iommu->register_lock, flags); 498 499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 500 if (!(sts & DMA_GSTS_IRES)) 501 goto end; 502 503 iommu->gcmd &= ~DMA_GCMD_IRE; 504 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 505 506 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 507 readl, !(sts & DMA_GSTS_IRES), sts); 508 509end: 510 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 511} 512 513static int __init dmar_x2apic_optout(void) 514{ 515 struct acpi_table_dmar *dmar; 516 dmar = (struct acpi_table_dmar *)dmar_tbl; 517 if (!dmar || no_x2apic_optout) 518 return 0; 519 return dmar->flags & DMAR_X2APIC_OPT_OUT; 520} 521 522int __init intr_remapping_supported(void) 523{ 524 struct dmar_drhd_unit *drhd; 525 526 if (disable_intremap) 527 return 0; 528 529 if (!dmar_ir_support()) 530 return 0; 531 532 for_each_drhd_unit(drhd) { 533 struct intel_iommu *iommu = drhd->iommu; 534 535 if (!ecap_ir_support(iommu->ecap)) 536 return 0; 537 } 538 539 return 1; 540} 541 542int __init enable_intr_remapping(void) 543{ 544 struct dmar_drhd_unit *drhd; 545 int setup = 0; 546 int eim = 0; 547 548 if (parse_ioapics_under_ir() != 1) { 549 printk(KERN_INFO "Not enable interrupt remapping\n"); 550 return -1; 551 } 552 553 if (x2apic_supported()) { 554 eim = !dmar_x2apic_optout(); 555 WARN(!eim, KERN_WARNING 556 "Your BIOS is broken and requested that x2apic be disabled\n" 557 "This will leave your machine vulnerable to irq-injection attacks\n" 558 "Use 'intremap=no_x2apic_optout' to override BIOS request\n"); 559 } 560 561 for_each_drhd_unit(drhd) { 562 struct intel_iommu *iommu = drhd->iommu; 563 564 /* 565 * If the queued invalidation is already initialized, 566 * shouldn't disable it. 567 */ 568 if (iommu->qi) 569 continue; 570 571 /* 572 * Clear previous faults. 573 */ 574 dmar_fault(-1, iommu); 575 576 /* 577 * Disable intr remapping and queued invalidation, if already 578 * enabled prior to OS handover. 579 */ 580 iommu_disable_intr_remapping(iommu); 581 582 dmar_disable_qi(iommu); 583 } 584 585 /* 586 * check for the Interrupt-remapping support 587 */ 588 for_each_drhd_unit(drhd) { 589 struct intel_iommu *iommu = drhd->iommu; 590 591 if (!ecap_ir_support(iommu->ecap)) 592 continue; 593 594 if (eim && !ecap_eim_support(iommu->ecap)) { 595 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " 596 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); 597 return -1; 598 } 599 } 600 601 /* 602 * Enable queued invalidation for all the DRHD's. 603 */ 604 for_each_drhd_unit(drhd) { 605 int ret; 606 struct intel_iommu *iommu = drhd->iommu; 607 ret = dmar_enable_qi(iommu); 608 609 if (ret) { 610 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " 611 " invalidation, ecap %Lx, ret %d\n", 612 drhd->reg_base_addr, iommu->ecap, ret); 613 return -1; 614 } 615 } 616 617 /* 618 * Setup Interrupt-remapping for all the DRHD's now. 619 */ 620 for_each_drhd_unit(drhd) { 621 struct intel_iommu *iommu = drhd->iommu; 622 623 if (!ecap_ir_support(iommu->ecap)) 624 continue; 625 626 if (setup_intr_remapping(iommu, eim)) 627 goto error; 628 629 setup = 1; 630 } 631 632 if (!setup) 633 goto error; 634 635 intr_remapping_enabled = 1; 636 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); 637 638 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; 639 640error: 641 /* 642 * handle error condition gracefully here! 643 */ 644 return -1; 645} 646 647static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, 648 struct intel_iommu *iommu) 649{ 650 struct acpi_dmar_pci_path *path; 651 u8 bus; 652 int count; 653 654 bus = scope->bus; 655 path = (struct acpi_dmar_pci_path *)(scope + 1); 656 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) 657 / sizeof(struct acpi_dmar_pci_path); 658 659 while (--count > 0) { 660 /* 661 * Access PCI directly due to the PCI 662 * subsystem isn't initialized yet. 663 */ 664 bus = read_pci_config_byte(bus, path->dev, path->fn, 665 PCI_SECONDARY_BUS); 666 path++; 667 } 668 ir_hpet[ir_hpet_num].bus = bus; 669 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); 670 ir_hpet[ir_hpet_num].iommu = iommu; 671 ir_hpet[ir_hpet_num].id = scope->enumeration_id; 672 ir_hpet_num++; 673} 674 675static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, 676 struct intel_iommu *iommu) 677{ 678 struct acpi_dmar_pci_path *path; 679 u8 bus; 680 int count; 681 682 bus = scope->bus; 683 path = (struct acpi_dmar_pci_path *)(scope + 1); 684 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) 685 / sizeof(struct acpi_dmar_pci_path); 686 687 while (--count > 0) { 688 /* 689 * Access PCI directly due to the PCI 690 * subsystem isn't initialized yet. 691 */ 692 bus = read_pci_config_byte(bus, path->dev, path->fn, 693 PCI_SECONDARY_BUS); 694 path++; 695 } 696 697 ir_ioapic[ir_ioapic_num].bus = bus; 698 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); 699 ir_ioapic[ir_ioapic_num].iommu = iommu; 700 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; 701 ir_ioapic_num++; 702} 703 704static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, 705 struct intel_iommu *iommu) 706{ 707 struct acpi_dmar_hardware_unit *drhd; 708 struct acpi_dmar_device_scope *scope; 709 void *start, *end; 710 711 drhd = (struct acpi_dmar_hardware_unit *)header; 712 713 start = (void *)(drhd + 1); 714 end = ((void *)drhd) + header->length; 715 716 while (start < end) { 717 scope = start; 718 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { 719 if (ir_ioapic_num == MAX_IO_APICS) { 720 printk(KERN_WARNING "Exceeded Max IO APICS\n"); 721 return -1; 722 } 723 724 printk(KERN_INFO "IOAPIC id %d under DRHD base " 725 " 0x%Lx IOMMU %d\n", scope->enumeration_id, 726 drhd->address, iommu->seq_id); 727 728 ir_parse_one_ioapic_scope(scope, iommu); 729 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { 730 if (ir_hpet_num == MAX_HPET_TBS) { 731 printk(KERN_WARNING "Exceeded Max HPET blocks\n"); 732 return -1; 733 } 734 735 printk(KERN_INFO "HPET id %d under DRHD base" 736 " 0x%Lx\n", scope->enumeration_id, 737 drhd->address); 738 739 ir_parse_one_hpet_scope(scope, iommu); 740 } 741 start += scope->length; 742 } 743 744 return 0; 745} 746 747/* 748 * Finds the assocaition between IOAPIC's and its Interrupt-remapping 749 * hardware unit. 750 */ 751int __init parse_ioapics_under_ir(void) 752{ 753 struct dmar_drhd_unit *drhd; 754 int ir_supported = 0; 755 756 for_each_drhd_unit(drhd) { 757 struct intel_iommu *iommu = drhd->iommu; 758 759 if (ecap_ir_support(iommu->ecap)) { 760 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) 761 return -1; 762 763 ir_supported = 1; 764 } 765 } 766 767 if (ir_supported && ir_ioapic_num != nr_ioapics) { 768 printk(KERN_WARNING 769 "Not all IO-APIC's listed under remapping hardware\n"); 770 return -1; 771 } 772 773 return ir_supported; 774} 775 776int __init ir_dev_scope_init(void) 777{ 778 if (!intr_remapping_enabled) 779 return 0; 780 781 return dmar_dev_scope_init(); 782} 783rootfs_initcall(ir_dev_scope_init); 784 785void disable_intr_remapping(void) 786{ 787 struct dmar_drhd_unit *drhd; 788 struct intel_iommu *iommu = NULL; 789 790 /* 791 * Disable Interrupt-remapping for all the DRHD's now. 792 */ 793 for_each_iommu(iommu, drhd) { 794 if (!ecap_ir_support(iommu->ecap)) 795 continue; 796 797 iommu_disable_intr_remapping(iommu); 798 } 799} 800 801int reenable_intr_remapping(int eim) 802{ 803 struct dmar_drhd_unit *drhd; 804 int setup = 0; 805 struct intel_iommu *iommu = NULL; 806 807 for_each_iommu(iommu, drhd) 808 if (iommu->qi) 809 dmar_reenable_qi(iommu); 810 811 /* 812 * Setup Interrupt-remapping for all the DRHD's now. 813 */ 814 for_each_iommu(iommu, drhd) { 815 if (!ecap_ir_support(iommu->ecap)) 816 continue; 817 818 /* Set up interrupt remapping for iommu.*/ 819 iommu_set_intr_remapping(iommu, eim); 820 setup = 1; 821 } 822 823 if (!setup) 824 goto error; 825 826 return 0; 827 828error: 829 /* 830 * handle error condition gracefully here! 831 */ 832 return -1; 833} 834 835