msi.c revision 379f5327a86f7822a51ec7d088a085167724df75
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 */ 8 9#include <linux/err.h> 10#include <linux/mm.h> 11#include <linux/irq.h> 12#include <linux/interrupt.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/pci.h> 16#include <linux/proc_fs.h> 17#include <linux/msi.h> 18#include <linux/smp.h> 19 20#include <asm/errno.h> 21#include <asm/io.h> 22 23#include "pci.h" 24#include "msi.h" 25 26static int pci_msi_enable = 1; 27 28/* Arch hooks */ 29 30#ifndef arch_msi_check_device 31int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) 32{ 33 return 0; 34} 35#endif 36 37#ifndef arch_setup_msi_irqs 38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 39{ 40 struct msi_desc *entry; 41 int ret; 42 43 list_for_each_entry(entry, &dev->msi_list, list) { 44 ret = arch_setup_msi_irq(dev, entry); 45 if (ret < 0) 46 return ret; 47 if (ret > 0) 48 return -ENOSPC; 49 } 50 51 return 0; 52} 53#endif 54 55#ifndef arch_teardown_msi_irqs 56void arch_teardown_msi_irqs(struct pci_dev *dev) 57{ 58 struct msi_desc *entry; 59 60 list_for_each_entry(entry, &dev->msi_list, list) { 61 if (entry->irq != 0) 62 arch_teardown_msi_irq(entry->irq); 63 } 64} 65#endif 66 67static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 68{ 69 u16 control; 70 71 if (pos) { 72 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 73 control &= ~PCI_MSI_FLAGS_ENABLE; 74 if (enable) 75 control |= PCI_MSI_FLAGS_ENABLE; 76 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 77 } 78} 79 80static void msi_set_enable(struct pci_dev *dev, int enable) 81{ 82 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); 83} 84 85static void msix_set_enable(struct pci_dev *dev, int enable) 86{ 87 int pos; 88 u16 control; 89 90 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 91 if (pos) { 92 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 93 control &= ~PCI_MSIX_FLAGS_ENABLE; 94 if (enable) 95 control |= PCI_MSIX_FLAGS_ENABLE; 96 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 97 } 98} 99 100static inline __attribute_const__ u32 msi_mask(unsigned x) 101{ 102 /* Don't shift by >= width of type */ 103 if (x >= 5) 104 return 0xffffffff; 105 return (1 << (1 << x)) - 1; 106} 107 108static void msix_flush_writes(struct irq_desc *desc) 109{ 110 struct msi_desc *entry; 111 112 entry = get_irq_desc_msi(desc); 113 BUG_ON(!entry); 114 if (entry->msi_attrib.is_msix) { 115 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 116 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 117 readl(entry->mask_base + offset); 118 } 119} 120 121/* 122 * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to 123 * mask all MSI interrupts by clearing the MSI enable bit does not work 124 * reliably as devices without an INTx disable bit will then generate a 125 * level IRQ which will never be cleared. 126 * 127 * Returns 1 if it succeeded in masking the interrupt and 0 if the device 128 * doesn't support MSI masking. 129 */ 130static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) 131{ 132 struct msi_desc *entry; 133 134 entry = get_irq_desc_msi(desc); 135 BUG_ON(!entry); 136 if (entry->msi_attrib.is_msix) { 137 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 138 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 139 writel(flag, entry->mask_base + offset); 140 readl(entry->mask_base + offset); 141 } else { 142 int pos; 143 u32 mask_bits; 144 145 if (!entry->msi_attrib.maskbit) 146 return 0; 147 148 pos = (long)entry->mask_base; 149 pci_read_config_dword(entry->dev, pos, &mask_bits); 150 mask_bits &= ~mask; 151 mask_bits |= flag & mask; 152 pci_write_config_dword(entry->dev, pos, mask_bits); 153 } 154 entry->msi_attrib.masked = !!flag; 155 return 1; 156} 157 158void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 159{ 160 struct msi_desc *entry = get_irq_desc_msi(desc); 161 if (entry->msi_attrib.is_msix) { 162 void __iomem *base = entry->mask_base + 163 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 164 165 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 166 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 167 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); 168 } else { 169 struct pci_dev *dev = entry->dev; 170 int pos = entry->msi_attrib.pos; 171 u16 data; 172 173 pci_read_config_dword(dev, msi_lower_address_reg(pos), 174 &msg->address_lo); 175 if (entry->msi_attrib.is_64) { 176 pci_read_config_dword(dev, msi_upper_address_reg(pos), 177 &msg->address_hi); 178 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 179 } else { 180 msg->address_hi = 0; 181 pci_read_config_word(dev, msi_data_reg(pos, 0), &data); 182 } 183 msg->data = data; 184 } 185} 186 187void read_msi_msg(unsigned int irq, struct msi_msg *msg) 188{ 189 struct irq_desc *desc = irq_to_desc(irq); 190 191 read_msi_msg_desc(desc, msg); 192} 193 194void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 195{ 196 struct msi_desc *entry = get_irq_desc_msi(desc); 197 if (entry->msi_attrib.is_msix) { 198 void __iomem *base; 199 base = entry->mask_base + 200 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 201 202 writel(msg->address_lo, 203 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 204 writel(msg->address_hi, 205 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 206 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); 207 } else { 208 struct pci_dev *dev = entry->dev; 209 int pos = entry->msi_attrib.pos; 210 211 pci_write_config_dword(dev, msi_lower_address_reg(pos), 212 msg->address_lo); 213 if (entry->msi_attrib.is_64) { 214 pci_write_config_dword(dev, msi_upper_address_reg(pos), 215 msg->address_hi); 216 pci_write_config_word(dev, msi_data_reg(pos, 1), 217 msg->data); 218 } else { 219 pci_write_config_word(dev, msi_data_reg(pos, 0), 220 msg->data); 221 } 222 } 223 entry->msg = *msg; 224} 225 226void write_msi_msg(unsigned int irq, struct msi_msg *msg) 227{ 228 struct irq_desc *desc = irq_to_desc(irq); 229 230 write_msi_msg_desc(desc, msg); 231} 232 233void mask_msi_irq(unsigned int irq) 234{ 235 struct irq_desc *desc = irq_to_desc(irq); 236 237 msi_set_mask_bits(desc, 1, 1); 238 msix_flush_writes(desc); 239} 240 241void unmask_msi_irq(unsigned int irq) 242{ 243 struct irq_desc *desc = irq_to_desc(irq); 244 245 msi_set_mask_bits(desc, 1, 0); 246 msix_flush_writes(desc); 247} 248 249static int msi_free_irqs(struct pci_dev* dev); 250 251static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) 252{ 253 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); 254 if (!desc) 255 return NULL; 256 257 INIT_LIST_HEAD(&desc->list); 258 desc->dev = dev; 259 260 return desc; 261} 262 263static void pci_intx_for_msi(struct pci_dev *dev, int enable) 264{ 265 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) 266 pci_intx(dev, enable); 267} 268 269static void __pci_restore_msi_state(struct pci_dev *dev) 270{ 271 int pos; 272 u16 control; 273 struct msi_desc *entry; 274 275 if (!dev->msi_enabled) 276 return; 277 278 entry = get_irq_msi(dev->irq); 279 pos = entry->msi_attrib.pos; 280 281 pci_intx_for_msi(dev, 0); 282 msi_set_enable(dev, 0); 283 write_msi_msg(dev->irq, &entry->msg); 284 if (entry->msi_attrib.maskbit) { 285 struct irq_desc *desc = irq_to_desc(dev->irq); 286 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, 287 entry->msi_attrib.masked); 288 } 289 290 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 291 control &= ~PCI_MSI_FLAGS_QSIZE; 292 control |= PCI_MSI_FLAGS_ENABLE; 293 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 294} 295 296static void __pci_restore_msix_state(struct pci_dev *dev) 297{ 298 int pos; 299 struct msi_desc *entry; 300 u16 control; 301 302 if (!dev->msix_enabled) 303 return; 304 305 /* route the table */ 306 pci_intx_for_msi(dev, 0); 307 msix_set_enable(dev, 0); 308 309 list_for_each_entry(entry, &dev->msi_list, list) { 310 struct irq_desc *desc = irq_to_desc(entry->irq); 311 write_msi_msg(entry->irq, &entry->msg); 312 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); 313 } 314 315 BUG_ON(list_empty(&dev->msi_list)); 316 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 317 pos = entry->msi_attrib.pos; 318 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 319 control &= ~PCI_MSIX_FLAGS_MASKALL; 320 control |= PCI_MSIX_FLAGS_ENABLE; 321 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 322} 323 324void pci_restore_msi_state(struct pci_dev *dev) 325{ 326 __pci_restore_msi_state(dev); 327 __pci_restore_msix_state(dev); 328} 329EXPORT_SYMBOL_GPL(pci_restore_msi_state); 330 331/** 332 * msi_capability_init - configure device's MSI capability structure 333 * @dev: pointer to the pci_dev data structure of MSI device function 334 * 335 * Setup the MSI capability structure of device function with a single 336 * MSI irq, regardless of device function is capable of handling 337 * multiple messages. A return of zero indicates the successful setup 338 * of an entry zero with the new MSI irq or non-zero for otherwise. 339 **/ 340static int msi_capability_init(struct pci_dev *dev) 341{ 342 struct msi_desc *entry; 343 int pos, ret; 344 u16 control; 345 346 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ 347 348 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 349 pci_read_config_word(dev, msi_control_reg(pos), &control); 350 /* MSI Entry Initialization */ 351 entry = alloc_msi_entry(dev); 352 if (!entry) 353 return -ENOMEM; 354 355 entry->msi_attrib.is_msix = 0; 356 entry->msi_attrib.is_64 = is_64bit_address(control); 357 entry->msi_attrib.entry_nr = 0; 358 entry->msi_attrib.maskbit = is_mask_bit_support(control); 359 entry->msi_attrib.masked = 1; 360 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 361 entry->msi_attrib.pos = pos; 362 if (entry->msi_attrib.maskbit) { 363 unsigned int base, maskbits, temp; 364 365 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 366 entry->mask_base = (void __iomem *)(long)base; 367 368 /* All MSIs are unmasked by default, Mask them all */ 369 pci_read_config_dword(dev, base, &maskbits); 370 temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1); 371 maskbits |= temp; 372 pci_write_config_dword(dev, base, maskbits); 373 entry->msi_attrib.maskbits_mask = temp; 374 } 375 list_add_tail(&entry->list, &dev->msi_list); 376 377 /* Configure MSI capability structure */ 378 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 379 if (ret) { 380 msi_free_irqs(dev); 381 return ret; 382 } 383 384 /* Set MSI enabled bits */ 385 pci_intx_for_msi(dev, 0); 386 msi_set_enable(dev, 1); 387 dev->msi_enabled = 1; 388 389 dev->irq = entry->irq; 390 return 0; 391} 392 393/** 394 * msix_capability_init - configure device's MSI-X capability 395 * @dev: pointer to the pci_dev data structure of MSI-X device function 396 * @entries: pointer to an array of struct msix_entry entries 397 * @nvec: number of @entries 398 * 399 * Setup the MSI-X capability structure of device function with a 400 * single MSI-X irq. A return of zero indicates the successful setup of 401 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 402 **/ 403static int msix_capability_init(struct pci_dev *dev, 404 struct msix_entry *entries, int nvec) 405{ 406 struct msi_desc *entry; 407 int pos, i, j, nr_entries, ret; 408 unsigned long phys_addr; 409 u32 table_offset; 410 u16 control; 411 u8 bir; 412 void __iomem *base; 413 414 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ 415 416 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 417 /* Request & Map MSI-X table region */ 418 pci_read_config_word(dev, msi_control_reg(pos), &control); 419 nr_entries = multi_msix_capable(control); 420 421 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 422 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 423 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 424 phys_addr = pci_resource_start (dev, bir) + table_offset; 425 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 426 if (base == NULL) 427 return -ENOMEM; 428 429 /* MSI-X Table Initialization */ 430 for (i = 0; i < nvec; i++) { 431 entry = alloc_msi_entry(dev); 432 if (!entry) 433 break; 434 435 j = entries[i].entry; 436 entry->msi_attrib.is_msix = 1; 437 entry->msi_attrib.is_64 = 1; 438 entry->msi_attrib.entry_nr = j; 439 entry->msi_attrib.maskbit = 1; 440 entry->msi_attrib.masked = 1; 441 entry->msi_attrib.default_irq = dev->irq; 442 entry->msi_attrib.pos = pos; 443 entry->mask_base = base; 444 445 list_add_tail(&entry->list, &dev->msi_list); 446 } 447 448 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 449 if (ret < 0) { 450 /* If we had some success report the number of irqs 451 * we succeeded in setting up. */ 452 int avail = 0; 453 list_for_each_entry(entry, &dev->msi_list, list) { 454 if (entry->irq != 0) { 455 avail++; 456 } 457 } 458 459 if (avail != 0) 460 ret = avail; 461 } 462 463 if (ret) { 464 msi_free_irqs(dev); 465 return ret; 466 } 467 468 i = 0; 469 list_for_each_entry(entry, &dev->msi_list, list) { 470 entries[i].vector = entry->irq; 471 set_irq_msi(entry->irq, entry); 472 i++; 473 } 474 /* Set MSI-X enabled bits */ 475 pci_intx_for_msi(dev, 0); 476 msix_set_enable(dev, 1); 477 dev->msix_enabled = 1; 478 479 return 0; 480} 481 482/** 483 * pci_msi_check_device - check whether MSI may be enabled on a device 484 * @dev: pointer to the pci_dev data structure of MSI device function 485 * @nvec: how many MSIs have been requested ? 486 * @type: are we checking for MSI or MSI-X ? 487 * 488 * Look at global flags, the device itself, and its parent busses 489 * to determine if MSI/-X are supported for the device. If MSI/-X is 490 * supported return 0, else return an error code. 491 **/ 492static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) 493{ 494 struct pci_bus *bus; 495 int ret; 496 497 /* MSI must be globally enabled and supported by the device */ 498 if (!pci_msi_enable || !dev || dev->no_msi) 499 return -EINVAL; 500 501 /* 502 * You can't ask to have 0 or less MSIs configured. 503 * a) it's stupid .. 504 * b) the list manipulation code assumes nvec >= 1. 505 */ 506 if (nvec < 1) 507 return -ERANGE; 508 509 /* Any bridge which does NOT route MSI transactions from it's 510 * secondary bus to it's primary bus must set NO_MSI flag on 511 * the secondary pci_bus. 512 * We expect only arch-specific PCI host bus controller driver 513 * or quirks for specific PCI bridges to be setting NO_MSI. 514 */ 515 for (bus = dev->bus; bus; bus = bus->parent) 516 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 517 return -EINVAL; 518 519 ret = arch_msi_check_device(dev, nvec, type); 520 if (ret) 521 return ret; 522 523 if (!pci_find_capability(dev, type)) 524 return -EINVAL; 525 526 return 0; 527} 528 529/** 530 * pci_enable_msi - configure device's MSI capability structure 531 * @dev: pointer to the pci_dev data structure of MSI device function 532 * 533 * Setup the MSI capability structure of device function with 534 * a single MSI irq upon its software driver call to request for 535 * MSI mode enabled on its hardware device function. A return of zero 536 * indicates the successful setup of an entry zero with the new MSI 537 * irq or non-zero for otherwise. 538 **/ 539int pci_enable_msi(struct pci_dev* dev) 540{ 541 int status; 542 543 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 544 if (status) 545 return status; 546 547 WARN_ON(!!dev->msi_enabled); 548 549 /* Check whether driver already requested for MSI-X irqs */ 550 if (dev->msix_enabled) { 551 dev_info(&dev->dev, "can't enable MSI " 552 "(MSI-X already enabled)\n"); 553 return -EINVAL; 554 } 555 status = msi_capability_init(dev); 556 return status; 557} 558EXPORT_SYMBOL(pci_enable_msi); 559 560void pci_msi_shutdown(struct pci_dev* dev) 561{ 562 struct msi_desc *entry; 563 564 if (!pci_msi_enable || !dev || !dev->msi_enabled) 565 return; 566 567 msi_set_enable(dev, 0); 568 pci_intx_for_msi(dev, 1); 569 dev->msi_enabled = 0; 570 571 BUG_ON(list_empty(&dev->msi_list)); 572 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 573 /* Return the the pci reset with msi irqs unmasked */ 574 if (entry->msi_attrib.maskbit) { 575 u32 mask = entry->msi_attrib.maskbits_mask; 576 struct irq_desc *desc = irq_to_desc(dev->irq); 577 msi_set_mask_bits(desc, mask, ~mask); 578 } 579 if (entry->msi_attrib.is_msix) 580 return; 581 582 /* Restore dev->irq to its default pin-assertion irq */ 583 dev->irq = entry->msi_attrib.default_irq; 584} 585 586void pci_disable_msi(struct pci_dev* dev) 587{ 588 struct msi_desc *entry; 589 590 if (!pci_msi_enable || !dev || !dev->msi_enabled) 591 return; 592 593 pci_msi_shutdown(dev); 594 595 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 596 if (entry->msi_attrib.is_msix) 597 return; 598 599 msi_free_irqs(dev); 600} 601EXPORT_SYMBOL(pci_disable_msi); 602 603static int msi_free_irqs(struct pci_dev* dev) 604{ 605 struct msi_desc *entry, *tmp; 606 607 list_for_each_entry(entry, &dev->msi_list, list) { 608 if (entry->irq) 609 BUG_ON(irq_has_action(entry->irq)); 610 } 611 612 arch_teardown_msi_irqs(dev); 613 614 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 615 if (entry->msi_attrib.is_msix) { 616 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 617 * PCI_MSIX_ENTRY_SIZE 618 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 619 620 if (list_is_last(&entry->list, &dev->msi_list)) 621 iounmap(entry->mask_base); 622 } 623 list_del(&entry->list); 624 kfree(entry); 625 } 626 627 return 0; 628} 629 630/** 631 * pci_msix_table_size - return the number of device's MSI-X table entries 632 * @dev: pointer to the pci_dev data structure of MSI-X device function 633 */ 634int pci_msix_table_size(struct pci_dev *dev) 635{ 636 int pos; 637 u16 control; 638 639 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 640 if (!pos) 641 return 0; 642 643 pci_read_config_word(dev, msi_control_reg(pos), &control); 644 return multi_msix_capable(control); 645} 646 647/** 648 * pci_enable_msix - configure device's MSI-X capability structure 649 * @dev: pointer to the pci_dev data structure of MSI-X device function 650 * @entries: pointer to an array of MSI-X entries 651 * @nvec: number of MSI-X irqs requested for allocation by device driver 652 * 653 * Setup the MSI-X capability structure of device function with the number 654 * of requested irqs upon its software driver call to request for 655 * MSI-X mode enabled on its hardware device function. A return of zero 656 * indicates the successful configuration of MSI-X capability structure 657 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 658 * Or a return of > 0 indicates that driver request is exceeding the number 659 * of irqs available. Driver should use the returned value to re-send 660 * its request. 661 **/ 662int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 663{ 664 int status, nr_entries; 665 int i, j; 666 667 if (!entries) 668 return -EINVAL; 669 670 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); 671 if (status) 672 return status; 673 674 nr_entries = pci_msix_table_size(dev); 675 if (nvec > nr_entries) 676 return -EINVAL; 677 678 /* Check for any invalid entries */ 679 for (i = 0; i < nvec; i++) { 680 if (entries[i].entry >= nr_entries) 681 return -EINVAL; /* invalid entry */ 682 for (j = i + 1; j < nvec; j++) { 683 if (entries[i].entry == entries[j].entry) 684 return -EINVAL; /* duplicate entry */ 685 } 686 } 687 WARN_ON(!!dev->msix_enabled); 688 689 /* Check whether driver already requested for MSI irq */ 690 if (dev->msi_enabled) { 691 dev_info(&dev->dev, "can't enable MSI-X " 692 "(MSI IRQ already assigned)\n"); 693 return -EINVAL; 694 } 695 status = msix_capability_init(dev, entries, nvec); 696 return status; 697} 698EXPORT_SYMBOL(pci_enable_msix); 699 700static void msix_free_all_irqs(struct pci_dev *dev) 701{ 702 msi_free_irqs(dev); 703} 704 705void pci_msix_shutdown(struct pci_dev* dev) 706{ 707 if (!pci_msi_enable || !dev || !dev->msix_enabled) 708 return; 709 710 msix_set_enable(dev, 0); 711 pci_intx_for_msi(dev, 1); 712 dev->msix_enabled = 0; 713} 714void pci_disable_msix(struct pci_dev* dev) 715{ 716 if (!pci_msi_enable || !dev || !dev->msix_enabled) 717 return; 718 719 pci_msix_shutdown(dev); 720 721 msix_free_all_irqs(dev); 722} 723EXPORT_SYMBOL(pci_disable_msix); 724 725/** 726 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state 727 * @dev: pointer to the pci_dev data structure of MSI(X) device function 728 * 729 * Being called during hotplug remove, from which the device function 730 * is hot-removed. All previous assigned MSI/MSI-X irqs, if 731 * allocated for this device function, are reclaimed to unused state, 732 * which may be used later on. 733 **/ 734void msi_remove_pci_irq_vectors(struct pci_dev* dev) 735{ 736 if (!pci_msi_enable || !dev) 737 return; 738 739 if (dev->msi_enabled) 740 msi_free_irqs(dev); 741 742 if (dev->msix_enabled) 743 msix_free_all_irqs(dev); 744} 745 746void pci_no_msi(void) 747{ 748 pci_msi_enable = 0; 749} 750 751/** 752 * pci_msi_enabled - is MSI enabled? 753 * 754 * Returns true if MSI has not been disabled by the command-line option 755 * pci=nomsi. 756 **/ 757int pci_msi_enabled(void) 758{ 759 return pci_msi_enable; 760} 761EXPORT_SYMBOL(pci_msi_enabled); 762 763void pci_msi_init_pci_dev(struct pci_dev *dev) 764{ 765 INIT_LIST_HEAD(&dev->msi_list); 766} 767