msi.c revision e63340ae6b6205fef26b40a75673d1c9c0c8bb90
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 */ 8 9#include <linux/err.h> 10#include <linux/mm.h> 11#include <linux/irq.h> 12#include <linux/interrupt.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/pci.h> 16#include <linux/proc_fs.h> 17#include <linux/msi.h> 18 19#include <asm/errno.h> 20#include <asm/io.h> 21#include <asm/smp.h> 22 23#include "pci.h" 24#include "msi.h" 25 26static int pci_msi_enable = 1; 27 28static void msi_set_enable(struct pci_dev *dev, int enable) 29{ 30 int pos; 31 u16 control; 32 33 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 34 if (pos) { 35 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 36 control &= ~PCI_MSI_FLAGS_ENABLE; 37 if (enable) 38 control |= PCI_MSI_FLAGS_ENABLE; 39 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 40 } 41} 42 43static void msix_set_enable(struct pci_dev *dev, int enable) 44{ 45 int pos; 46 u16 control; 47 48 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 49 if (pos) { 50 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 51 control &= ~PCI_MSIX_FLAGS_ENABLE; 52 if (enable) 53 control |= PCI_MSIX_FLAGS_ENABLE; 54 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 55 } 56} 57 58static void msix_flush_writes(unsigned int irq) 59{ 60 struct msi_desc *entry; 61 62 entry = get_irq_msi(irq); 63 BUG_ON(!entry || !entry->dev); 64 switch (entry->msi_attrib.type) { 65 case PCI_CAP_ID_MSI: 66 /* nothing to do */ 67 break; 68 case PCI_CAP_ID_MSIX: 69 { 70 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 71 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 72 readl(entry->mask_base + offset); 73 break; 74 } 75 default: 76 BUG(); 77 break; 78 } 79} 80 81static void msi_set_mask_bit(unsigned int irq, int flag) 82{ 83 struct msi_desc *entry; 84 85 entry = get_irq_msi(irq); 86 BUG_ON(!entry || !entry->dev); 87 switch (entry->msi_attrib.type) { 88 case PCI_CAP_ID_MSI: 89 if (entry->msi_attrib.maskbit) { 90 int pos; 91 u32 mask_bits; 92 93 pos = (long)entry->mask_base; 94 pci_read_config_dword(entry->dev, pos, &mask_bits); 95 mask_bits &= ~(1); 96 mask_bits |= flag; 97 pci_write_config_dword(entry->dev, pos, mask_bits); 98 } else { 99 msi_set_enable(entry->dev, !flag); 100 } 101 break; 102 case PCI_CAP_ID_MSIX: 103 { 104 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 105 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 106 writel(flag, entry->mask_base + offset); 107 readl(entry->mask_base + offset); 108 break; 109 } 110 default: 111 BUG(); 112 break; 113 } 114 entry->msi_attrib.masked = !!flag; 115} 116 117void read_msi_msg(unsigned int irq, struct msi_msg *msg) 118{ 119 struct msi_desc *entry = get_irq_msi(irq); 120 switch(entry->msi_attrib.type) { 121 case PCI_CAP_ID_MSI: 122 { 123 struct pci_dev *dev = entry->dev; 124 int pos = entry->msi_attrib.pos; 125 u16 data; 126 127 pci_read_config_dword(dev, msi_lower_address_reg(pos), 128 &msg->address_lo); 129 if (entry->msi_attrib.is_64) { 130 pci_read_config_dword(dev, msi_upper_address_reg(pos), 131 &msg->address_hi); 132 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 133 } else { 134 msg->address_hi = 0; 135 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 136 } 137 msg->data = data; 138 break; 139 } 140 case PCI_CAP_ID_MSIX: 141 { 142 void __iomem *base; 143 base = entry->mask_base + 144 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 145 146 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 147 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 148 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); 149 break; 150 } 151 default: 152 BUG(); 153 } 154} 155 156void write_msi_msg(unsigned int irq, struct msi_msg *msg) 157{ 158 struct msi_desc *entry = get_irq_msi(irq); 159 switch (entry->msi_attrib.type) { 160 case PCI_CAP_ID_MSI: 161 { 162 struct pci_dev *dev = entry->dev; 163 int pos = entry->msi_attrib.pos; 164 165 pci_write_config_dword(dev, msi_lower_address_reg(pos), 166 msg->address_lo); 167 if (entry->msi_attrib.is_64) { 168 pci_write_config_dword(dev, msi_upper_address_reg(pos), 169 msg->address_hi); 170 pci_write_config_word(dev, msi_data_reg(pos, 1), 171 msg->data); 172 } else { 173 pci_write_config_word(dev, msi_data_reg(pos, 0), 174 msg->data); 175 } 176 break; 177 } 178 case PCI_CAP_ID_MSIX: 179 { 180 void __iomem *base; 181 base = entry->mask_base + 182 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 183 184 writel(msg->address_lo, 185 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 186 writel(msg->address_hi, 187 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 188 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); 189 break; 190 } 191 default: 192 BUG(); 193 } 194 entry->msg = *msg; 195} 196 197void mask_msi_irq(unsigned int irq) 198{ 199 msi_set_mask_bit(irq, 1); 200 msix_flush_writes(irq); 201} 202 203void unmask_msi_irq(unsigned int irq) 204{ 205 msi_set_mask_bit(irq, 0); 206 msix_flush_writes(irq); 207} 208 209static int msi_free_irqs(struct pci_dev* dev); 210 211 212static struct msi_desc* alloc_msi_entry(void) 213{ 214 struct msi_desc *entry; 215 216 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL); 217 if (!entry) 218 return NULL; 219 220 INIT_LIST_HEAD(&entry->list); 221 entry->irq = 0; 222 entry->dev = NULL; 223 224 return entry; 225} 226 227#ifdef CONFIG_PM 228static void __pci_restore_msi_state(struct pci_dev *dev) 229{ 230 int pos; 231 u16 control; 232 struct msi_desc *entry; 233 234 if (!dev->msi_enabled) 235 return; 236 237 entry = get_irq_msi(dev->irq); 238 pos = entry->msi_attrib.pos; 239 240 pci_intx(dev, 0); /* disable intx */ 241 msi_set_enable(dev, 0); 242 write_msi_msg(dev->irq, &entry->msg); 243 if (entry->msi_attrib.maskbit) 244 msi_set_mask_bit(dev->irq, entry->msi_attrib.masked); 245 246 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 247 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); 248 if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked) 249 control |= PCI_MSI_FLAGS_ENABLE; 250 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 251} 252 253static void __pci_restore_msix_state(struct pci_dev *dev) 254{ 255 int pos; 256 struct msi_desc *entry; 257 u16 control; 258 259 if (!dev->msix_enabled) 260 return; 261 262 /* route the table */ 263 pci_intx(dev, 0); /* disable intx */ 264 msix_set_enable(dev, 0); 265 266 list_for_each_entry(entry, &dev->msi_list, list) { 267 write_msi_msg(entry->irq, &entry->msg); 268 msi_set_mask_bit(entry->irq, entry->msi_attrib.masked); 269 } 270 271 BUG_ON(list_empty(&dev->msi_list)); 272 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 273 pos = entry->msi_attrib.pos; 274 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 275 control &= ~PCI_MSIX_FLAGS_MASKALL; 276 control |= PCI_MSIX_FLAGS_ENABLE; 277 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 278} 279 280void pci_restore_msi_state(struct pci_dev *dev) 281{ 282 __pci_restore_msi_state(dev); 283 __pci_restore_msix_state(dev); 284} 285#endif /* CONFIG_PM */ 286 287/** 288 * msi_capability_init - configure device's MSI capability structure 289 * @dev: pointer to the pci_dev data structure of MSI device function 290 * 291 * Setup the MSI capability structure of device function with a single 292 * MSI irq, regardless of device function is capable of handling 293 * multiple messages. A return of zero indicates the successful setup 294 * of an entry zero with the new MSI irq or non-zero for otherwise. 295 **/ 296static int msi_capability_init(struct pci_dev *dev) 297{ 298 struct msi_desc *entry; 299 int pos, ret; 300 u16 control; 301 302 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ 303 304 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 305 pci_read_config_word(dev, msi_control_reg(pos), &control); 306 /* MSI Entry Initialization */ 307 entry = alloc_msi_entry(); 308 if (!entry) 309 return -ENOMEM; 310 311 entry->msi_attrib.type = PCI_CAP_ID_MSI; 312 entry->msi_attrib.is_64 = is_64bit_address(control); 313 entry->msi_attrib.entry_nr = 0; 314 entry->msi_attrib.maskbit = is_mask_bit_support(control); 315 entry->msi_attrib.masked = 1; 316 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 317 entry->msi_attrib.pos = pos; 318 if (is_mask_bit_support(control)) { 319 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, 320 is_64bit_address(control)); 321 } 322 entry->dev = dev; 323 if (entry->msi_attrib.maskbit) { 324 unsigned int maskbits, temp; 325 /* All MSIs are unmasked by default, Mask them all */ 326 pci_read_config_dword(dev, 327 msi_mask_bits_reg(pos, is_64bit_address(control)), 328 &maskbits); 329 temp = (1 << multi_msi_capable(control)); 330 temp = ((temp - 1) & ~temp); 331 maskbits |= temp; 332 pci_write_config_dword(dev, 333 msi_mask_bits_reg(pos, is_64bit_address(control)), 334 maskbits); 335 } 336 list_add(&entry->list, &dev->msi_list); 337 338 /* Configure MSI capability structure */ 339 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 340 if (ret) { 341 msi_free_irqs(dev); 342 return ret; 343 } 344 345 /* Set MSI enabled bits */ 346 pci_intx(dev, 0); /* disable intx */ 347 msi_set_enable(dev, 1); 348 dev->msi_enabled = 1; 349 350 dev->irq = entry->irq; 351 return 0; 352} 353 354/** 355 * msix_capability_init - configure device's MSI-X capability 356 * @dev: pointer to the pci_dev data structure of MSI-X device function 357 * @entries: pointer to an array of struct msix_entry entries 358 * @nvec: number of @entries 359 * 360 * Setup the MSI-X capability structure of device function with a 361 * single MSI-X irq. A return of zero indicates the successful setup of 362 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 363 **/ 364static int msix_capability_init(struct pci_dev *dev, 365 struct msix_entry *entries, int nvec) 366{ 367 struct msi_desc *entry; 368 int pos, i, j, nr_entries, ret; 369 unsigned long phys_addr; 370 u32 table_offset; 371 u16 control; 372 u8 bir; 373 void __iomem *base; 374 375 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ 376 377 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 378 /* Request & Map MSI-X table region */ 379 pci_read_config_word(dev, msi_control_reg(pos), &control); 380 nr_entries = multi_msix_capable(control); 381 382 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 383 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 384 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 385 phys_addr = pci_resource_start (dev, bir) + table_offset; 386 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 387 if (base == NULL) 388 return -ENOMEM; 389 390 /* MSI-X Table Initialization */ 391 for (i = 0; i < nvec; i++) { 392 entry = alloc_msi_entry(); 393 if (!entry) 394 break; 395 396 j = entries[i].entry; 397 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 398 entry->msi_attrib.is_64 = 1; 399 entry->msi_attrib.entry_nr = j; 400 entry->msi_attrib.maskbit = 1; 401 entry->msi_attrib.masked = 1; 402 entry->msi_attrib.default_irq = dev->irq; 403 entry->msi_attrib.pos = pos; 404 entry->dev = dev; 405 entry->mask_base = base; 406 407 list_add(&entry->list, &dev->msi_list); 408 } 409 410 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 411 if (ret) { 412 int avail = 0; 413 list_for_each_entry(entry, &dev->msi_list, list) { 414 if (entry->irq != 0) { 415 avail++; 416 } 417 } 418 419 msi_free_irqs(dev); 420 421 /* If we had some success report the number of irqs 422 * we succeeded in setting up. 423 */ 424 if (avail == 0) 425 avail = ret; 426 return avail; 427 } 428 429 i = 0; 430 list_for_each_entry(entry, &dev->msi_list, list) { 431 entries[i].vector = entry->irq; 432 set_irq_msi(entry->irq, entry); 433 i++; 434 } 435 /* Set MSI-X enabled bits */ 436 pci_intx(dev, 0); /* disable intx */ 437 msix_set_enable(dev, 1); 438 dev->msix_enabled = 1; 439 440 return 0; 441} 442 443/** 444 * pci_msi_check_device - check whether MSI may be enabled on a device 445 * @dev: pointer to the pci_dev data structure of MSI device function 446 * @nvec: how many MSIs have been requested ? 447 * @type: are we checking for MSI or MSI-X ? 448 * 449 * Look at global flags, the device itself, and its parent busses 450 * to determine if MSI/-X are supported for the device. If MSI/-X is 451 * supported return 0, else return an error code. 452 **/ 453static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) 454{ 455 struct pci_bus *bus; 456 int ret; 457 458 /* MSI must be globally enabled and supported by the device */ 459 if (!pci_msi_enable || !dev || dev->no_msi) 460 return -EINVAL; 461 462 /* 463 * You can't ask to have 0 or less MSIs configured. 464 * a) it's stupid .. 465 * b) the list manipulation code assumes nvec >= 1. 466 */ 467 if (nvec < 1) 468 return -ERANGE; 469 470 /* Any bridge which does NOT route MSI transactions from it's 471 * secondary bus to it's primary bus must set NO_MSI flag on 472 * the secondary pci_bus. 473 * We expect only arch-specific PCI host bus controller driver 474 * or quirks for specific PCI bridges to be setting NO_MSI. 475 */ 476 for (bus = dev->bus; bus; bus = bus->parent) 477 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 478 return -EINVAL; 479 480 ret = arch_msi_check_device(dev, nvec, type); 481 if (ret) 482 return ret; 483 484 if (!pci_find_capability(dev, type)) 485 return -EINVAL; 486 487 return 0; 488} 489 490/** 491 * pci_enable_msi - configure device's MSI capability structure 492 * @dev: pointer to the pci_dev data structure of MSI device function 493 * 494 * Setup the MSI capability structure of device function with 495 * a single MSI irq upon its software driver call to request for 496 * MSI mode enabled on its hardware device function. A return of zero 497 * indicates the successful setup of an entry zero with the new MSI 498 * irq or non-zero for otherwise. 499 **/ 500int pci_enable_msi(struct pci_dev* dev) 501{ 502 int status; 503 504 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 505 if (status) 506 return status; 507 508 WARN_ON(!!dev->msi_enabled); 509 510 /* Check whether driver already requested for MSI-X irqs */ 511 if (dev->msix_enabled) { 512 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 513 "Device already has MSI-X enabled\n", 514 pci_name(dev)); 515 return -EINVAL; 516 } 517 status = msi_capability_init(dev); 518 return status; 519} 520EXPORT_SYMBOL(pci_enable_msi); 521 522void pci_disable_msi(struct pci_dev* dev) 523{ 524 struct msi_desc *entry; 525 int default_irq; 526 527 if (!pci_msi_enable || !dev || !dev->msi_enabled) 528 return; 529 530 msi_set_enable(dev, 0); 531 pci_intx(dev, 1); /* enable intx */ 532 dev->msi_enabled = 0; 533 534 BUG_ON(list_empty(&dev->msi_list)); 535 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 536 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 537 return; 538 } 539 540 default_irq = entry->msi_attrib.default_irq; 541 msi_free_irqs(dev); 542 543 /* Restore dev->irq to its default pin-assertion irq */ 544 dev->irq = default_irq; 545} 546EXPORT_SYMBOL(pci_disable_msi); 547 548static int msi_free_irqs(struct pci_dev* dev) 549{ 550 struct msi_desc *entry, *tmp; 551 552 list_for_each_entry(entry, &dev->msi_list, list) 553 BUG_ON(irq_has_action(entry->irq)); 554 555 arch_teardown_msi_irqs(dev); 556 557 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 558 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { 559 if (list_is_last(&entry->list, &dev->msi_list)) 560 iounmap(entry->mask_base); 561 562 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 563 * PCI_MSIX_ENTRY_SIZE 564 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 565 } 566 list_del(&entry->list); 567 kfree(entry); 568 } 569 570 return 0; 571} 572 573/** 574 * pci_enable_msix - configure device's MSI-X capability structure 575 * @dev: pointer to the pci_dev data structure of MSI-X device function 576 * @entries: pointer to an array of MSI-X entries 577 * @nvec: number of MSI-X irqs requested for allocation by device driver 578 * 579 * Setup the MSI-X capability structure of device function with the number 580 * of requested irqs upon its software driver call to request for 581 * MSI-X mode enabled on its hardware device function. A return of zero 582 * indicates the successful configuration of MSI-X capability structure 583 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 584 * Or a return of > 0 indicates that driver request is exceeding the number 585 * of irqs available. Driver should use the returned value to re-send 586 * its request. 587 **/ 588int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 589{ 590 int status, pos, nr_entries; 591 int i, j; 592 u16 control; 593 594 if (!entries) 595 return -EINVAL; 596 597 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); 598 if (status) 599 return status; 600 601 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 602 pci_read_config_word(dev, msi_control_reg(pos), &control); 603 nr_entries = multi_msix_capable(control); 604 if (nvec > nr_entries) 605 return -EINVAL; 606 607 /* Check for any invalid entries */ 608 for (i = 0; i < nvec; i++) { 609 if (entries[i].entry >= nr_entries) 610 return -EINVAL; /* invalid entry */ 611 for (j = i + 1; j < nvec; j++) { 612 if (entries[i].entry == entries[j].entry) 613 return -EINVAL; /* duplicate entry */ 614 } 615 } 616 WARN_ON(!!dev->msix_enabled); 617 618 /* Check whether driver already requested for MSI irq */ 619 if (dev->msi_enabled) { 620 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 621 "Device already has an MSI irq assigned\n", 622 pci_name(dev)); 623 return -EINVAL; 624 } 625 status = msix_capability_init(dev, entries, nvec); 626 return status; 627} 628EXPORT_SYMBOL(pci_enable_msix); 629 630static void msix_free_all_irqs(struct pci_dev *dev) 631{ 632 msi_free_irqs(dev); 633} 634 635void pci_disable_msix(struct pci_dev* dev) 636{ 637 if (!pci_msi_enable || !dev || !dev->msix_enabled) 638 return; 639 640 msix_set_enable(dev, 0); 641 pci_intx(dev, 1); /* enable intx */ 642 dev->msix_enabled = 0; 643 644 msix_free_all_irqs(dev); 645} 646EXPORT_SYMBOL(pci_disable_msix); 647 648/** 649 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state 650 * @dev: pointer to the pci_dev data structure of MSI(X) device function 651 * 652 * Being called during hotplug remove, from which the device function 653 * is hot-removed. All previous assigned MSI/MSI-X irqs, if 654 * allocated for this device function, are reclaimed to unused state, 655 * which may be used later on. 656 **/ 657void msi_remove_pci_irq_vectors(struct pci_dev* dev) 658{ 659 if (!pci_msi_enable || !dev) 660 return; 661 662 if (dev->msi_enabled) 663 msi_free_irqs(dev); 664 665 if (dev->msix_enabled) 666 msix_free_all_irqs(dev); 667} 668 669void pci_no_msi(void) 670{ 671 pci_msi_enable = 0; 672} 673 674void pci_msi_init_pci_dev(struct pci_dev *dev) 675{ 676 INIT_LIST_HEAD(&dev->msi_list); 677} 678 679 680/* Arch hooks */ 681 682int __attribute__ ((weak)) 683arch_msi_check_device(struct pci_dev* dev, int nvec, int type) 684{ 685 return 0; 686} 687 688int __attribute__ ((weak)) 689arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) 690{ 691 return 0; 692} 693 694int __attribute__ ((weak)) 695arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 696{ 697 struct msi_desc *entry; 698 int ret; 699 700 list_for_each_entry(entry, &dev->msi_list, list) { 701 ret = arch_setup_msi_irq(dev, entry); 702 if (ret) 703 return ret; 704 } 705 706 return 0; 707} 708 709void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) 710{ 711 return; 712} 713 714void __attribute__ ((weak)) 715arch_teardown_msi_irqs(struct pci_dev *dev) 716{ 717 struct msi_desc *entry; 718 719 list_for_each_entry(entry, &dev->msi_list, list) { 720 if (entry->irq != 0) 721 arch_teardown_msi_irq(entry->irq); 722 } 723} 724