msi.c revision 07ae95f988a34465bdcb384bfa73c03424fe2312
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 */ 8 9#include <linux/err.h> 10#include <linux/mm.h> 11#include <linux/irq.h> 12#include <linux/interrupt.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/pci.h> 16#include <linux/proc_fs.h> 17#include <linux/msi.h> 18#include <linux/smp.h> 19 20#include <asm/errno.h> 21#include <asm/io.h> 22 23#include "pci.h" 24#include "msi.h" 25 26static int pci_msi_enable = 1; 27 28/* Arch hooks */ 29 30int __attribute__ ((weak)) 31arch_msi_check_device(struct pci_dev *dev, int nvec, int type) 32{ 33 return 0; 34} 35 36int __attribute__ ((weak)) 37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) 38{ 39 return 0; 40} 41 42int __attribute__ ((weak)) 43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 44{ 45 struct msi_desc *entry; 46 int ret; 47 48 list_for_each_entry(entry, &dev->msi_list, list) { 49 ret = arch_setup_msi_irq(dev, entry); 50 if (ret) 51 return ret; 52 } 53 54 return 0; 55} 56 57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) 58{ 59 return; 60} 61 62void __attribute__ ((weak)) 63arch_teardown_msi_irqs(struct pci_dev *dev) 64{ 65 struct msi_desc *entry; 66 67 list_for_each_entry(entry, &dev->msi_list, list) { 68 if (entry->irq != 0) 69 arch_teardown_msi_irq(entry->irq); 70 } 71} 72 73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 74{ 75 u16 control; 76 77 if (pos) { 78 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 79 control &= ~PCI_MSI_FLAGS_ENABLE; 80 if (enable) 81 control |= PCI_MSI_FLAGS_ENABLE; 82 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 83 } 84} 85 86static void msi_set_enable(struct pci_dev *dev, int enable) 87{ 88 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); 89} 90 91static void msix_set_enable(struct pci_dev *dev, int enable) 92{ 93 int pos; 94 u16 control; 95 96 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 97 if (pos) { 98 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 99 control &= ~PCI_MSIX_FLAGS_ENABLE; 100 if (enable) 101 control |= PCI_MSIX_FLAGS_ENABLE; 102 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 103 } 104} 105 106static void msix_flush_writes(struct irq_desc *desc) 107{ 108 struct msi_desc *entry; 109 110 entry = get_irq_desc_msi(desc); 111 BUG_ON(!entry || !entry->dev); 112 switch (entry->msi_attrib.type) { 113 case PCI_CAP_ID_MSI: 114 /* nothing to do */ 115 break; 116 case PCI_CAP_ID_MSIX: 117 { 118 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 119 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 120 readl(entry->mask_base + offset); 121 break; 122 } 123 default: 124 BUG(); 125 break; 126 } 127} 128 129/* 130 * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to 131 * mask all MSI interrupts by clearing the MSI enable bit does not work 132 * reliably as devices without an INTx disable bit will then generate a 133 * level IRQ which will never be cleared. 134 * 135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device 136 * doesn't support MSI masking. 137 */ 138static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) 139{ 140 struct msi_desc *entry; 141 142 entry = get_irq_desc_msi(desc); 143 BUG_ON(!entry || !entry->dev); 144 switch (entry->msi_attrib.type) { 145 case PCI_CAP_ID_MSI: 146 if (entry->msi_attrib.maskbit) { 147 int pos; 148 u32 mask_bits; 149 150 pos = (long)entry->mask_base; 151 pci_read_config_dword(entry->dev, pos, &mask_bits); 152 mask_bits &= ~(mask); 153 mask_bits |= flag & mask; 154 pci_write_config_dword(entry->dev, pos, mask_bits); 155 } else { 156 return 0; 157 } 158 break; 159 case PCI_CAP_ID_MSIX: 160 { 161 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 162 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 163 writel(flag, entry->mask_base + offset); 164 readl(entry->mask_base + offset); 165 break; 166 } 167 default: 168 BUG(); 169 break; 170 } 171 entry->msi_attrib.masked = !!flag; 172 return 1; 173} 174 175void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 176{ 177 struct msi_desc *entry = get_irq_desc_msi(desc); 178 switch(entry->msi_attrib.type) { 179 case PCI_CAP_ID_MSI: 180 { 181 struct pci_dev *dev = entry->dev; 182 int pos = entry->msi_attrib.pos; 183 u16 data; 184 185 pci_read_config_dword(dev, msi_lower_address_reg(pos), 186 &msg->address_lo); 187 if (entry->msi_attrib.is_64) { 188 pci_read_config_dword(dev, msi_upper_address_reg(pos), 189 &msg->address_hi); 190 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 191 } else { 192 msg->address_hi = 0; 193 pci_read_config_word(dev, msi_data_reg(pos, 0), &data); 194 } 195 msg->data = data; 196 break; 197 } 198 case PCI_CAP_ID_MSIX: 199 { 200 void __iomem *base; 201 base = entry->mask_base + 202 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 203 204 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 205 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 206 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); 207 break; 208 } 209 default: 210 BUG(); 211 } 212} 213 214void read_msi_msg(unsigned int irq, struct msi_msg *msg) 215{ 216 struct irq_desc *desc = irq_to_desc(irq); 217 218 read_msi_msg_desc(desc, msg); 219} 220 221void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 222{ 223 struct msi_desc *entry = get_irq_desc_msi(desc); 224 switch (entry->msi_attrib.type) { 225 case PCI_CAP_ID_MSI: 226 { 227 struct pci_dev *dev = entry->dev; 228 int pos = entry->msi_attrib.pos; 229 230 pci_write_config_dword(dev, msi_lower_address_reg(pos), 231 msg->address_lo); 232 if (entry->msi_attrib.is_64) { 233 pci_write_config_dword(dev, msi_upper_address_reg(pos), 234 msg->address_hi); 235 pci_write_config_word(dev, msi_data_reg(pos, 1), 236 msg->data); 237 } else { 238 pci_write_config_word(dev, msi_data_reg(pos, 0), 239 msg->data); 240 } 241 break; 242 } 243 case PCI_CAP_ID_MSIX: 244 { 245 void __iomem *base; 246 base = entry->mask_base + 247 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 248 249 writel(msg->address_lo, 250 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 251 writel(msg->address_hi, 252 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 253 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); 254 break; 255 } 256 default: 257 BUG(); 258 } 259 entry->msg = *msg; 260} 261 262void write_msi_msg(unsigned int irq, struct msi_msg *msg) 263{ 264 struct irq_desc *desc = irq_to_desc(irq); 265 266 write_msi_msg_desc(desc, msg); 267} 268 269void mask_msi_irq(unsigned int irq) 270{ 271 struct irq_desc *desc = irq_to_desc(irq); 272 273 msi_set_mask_bits(desc, 1, 1); 274 msix_flush_writes(desc); 275} 276 277void unmask_msi_irq(unsigned int irq) 278{ 279 struct irq_desc *desc = irq_to_desc(irq); 280 281 msi_set_mask_bits(desc, 1, 0); 282 msix_flush_writes(desc); 283} 284 285static int msi_free_irqs(struct pci_dev* dev); 286 287static struct msi_desc* alloc_msi_entry(void) 288{ 289 struct msi_desc *entry; 290 291 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL); 292 if (!entry) 293 return NULL; 294 295 INIT_LIST_HEAD(&entry->list); 296 entry->irq = 0; 297 entry->dev = NULL; 298 299 return entry; 300} 301 302static void pci_intx_for_msi(struct pci_dev *dev, int enable) 303{ 304 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) 305 pci_intx(dev, enable); 306} 307 308static void __pci_restore_msi_state(struct pci_dev *dev) 309{ 310 int pos; 311 u16 control; 312 struct msi_desc *entry; 313 314 if (!dev->msi_enabled) 315 return; 316 317 entry = get_irq_msi(dev->irq); 318 pos = entry->msi_attrib.pos; 319 320 pci_intx_for_msi(dev, 0); 321 msi_set_enable(dev, 0); 322 write_msi_msg(dev->irq, &entry->msg); 323 if (entry->msi_attrib.maskbit) { 324 struct irq_desc *desc = irq_to_desc(dev->irq); 325 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, 326 entry->msi_attrib.masked); 327 } 328 329 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 330 control &= ~PCI_MSI_FLAGS_QSIZE; 331 control |= PCI_MSI_FLAGS_ENABLE; 332 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 333} 334 335static void __pci_restore_msix_state(struct pci_dev *dev) 336{ 337 int pos; 338 struct msi_desc *entry; 339 u16 control; 340 341 if (!dev->msix_enabled) 342 return; 343 344 /* route the table */ 345 pci_intx_for_msi(dev, 0); 346 msix_set_enable(dev, 0); 347 348 list_for_each_entry(entry, &dev->msi_list, list) { 349 struct irq_desc *desc = irq_to_desc(entry->irq); 350 write_msi_msg(entry->irq, &entry->msg); 351 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); 352 } 353 354 BUG_ON(list_empty(&dev->msi_list)); 355 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 356 pos = entry->msi_attrib.pos; 357 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 358 control &= ~PCI_MSIX_FLAGS_MASKALL; 359 control |= PCI_MSIX_FLAGS_ENABLE; 360 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 361} 362 363void pci_restore_msi_state(struct pci_dev *dev) 364{ 365 __pci_restore_msi_state(dev); 366 __pci_restore_msix_state(dev); 367} 368EXPORT_SYMBOL_GPL(pci_restore_msi_state); 369 370/** 371 * msi_capability_init - configure device's MSI capability structure 372 * @dev: pointer to the pci_dev data structure of MSI device function 373 * 374 * Setup the MSI capability structure of device function with a single 375 * MSI irq, regardless of device function is capable of handling 376 * multiple messages. A return of zero indicates the successful setup 377 * of an entry zero with the new MSI irq or non-zero for otherwise. 378 **/ 379static int msi_capability_init(struct pci_dev *dev) 380{ 381 struct msi_desc *entry; 382 int pos, ret; 383 u16 control; 384 385 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ 386 387 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 388 pci_read_config_word(dev, msi_control_reg(pos), &control); 389 /* MSI Entry Initialization */ 390 entry = alloc_msi_entry(); 391 if (!entry) 392 return -ENOMEM; 393 394 entry->msi_attrib.type = PCI_CAP_ID_MSI; 395 entry->msi_attrib.is_64 = is_64bit_address(control); 396 entry->msi_attrib.entry_nr = 0; 397 entry->msi_attrib.maskbit = is_mask_bit_support(control); 398 entry->msi_attrib.masked = 1; 399 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 400 entry->msi_attrib.pos = pos; 401 if (entry->msi_attrib.maskbit) { 402 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, 403 entry->msi_attrib.is_64); 404 } 405 entry->dev = dev; 406 if (entry->msi_attrib.maskbit) { 407 unsigned int maskbits, temp; 408 /* All MSIs are unmasked by default, Mask them all */ 409 pci_read_config_dword(dev, 410 msi_mask_bits_reg(pos, entry->msi_attrib.is_64), 411 &maskbits); 412 temp = (1 << multi_msi_capable(control)); 413 temp = ((temp - 1) & ~temp); 414 maskbits |= temp; 415 pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits); 416 entry->msi_attrib.maskbits_mask = temp; 417 } 418 list_add_tail(&entry->list, &dev->msi_list); 419 420 /* Configure MSI capability structure */ 421 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 422 if (ret) { 423 msi_free_irqs(dev); 424 return ret; 425 } 426 427 /* Set MSI enabled bits */ 428 pci_intx_for_msi(dev, 0); 429 msi_set_enable(dev, 1); 430 dev->msi_enabled = 1; 431 432 dev->irq = entry->irq; 433 return 0; 434} 435 436/** 437 * msix_capability_init - configure device's MSI-X capability 438 * @dev: pointer to the pci_dev data structure of MSI-X device function 439 * @entries: pointer to an array of struct msix_entry entries 440 * @nvec: number of @entries 441 * 442 * Setup the MSI-X capability structure of device function with a 443 * single MSI-X irq. A return of zero indicates the successful setup of 444 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 445 **/ 446static int msix_capability_init(struct pci_dev *dev, 447 struct msix_entry *entries, int nvec) 448{ 449 struct msi_desc *entry; 450 int pos, i, j, nr_entries, ret; 451 unsigned long phys_addr; 452 u32 table_offset; 453 u16 control; 454 u8 bir; 455 void __iomem *base; 456 457 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ 458 459 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 460 /* Request & Map MSI-X table region */ 461 pci_read_config_word(dev, msi_control_reg(pos), &control); 462 nr_entries = multi_msix_capable(control); 463 464 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 465 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 466 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 467 phys_addr = pci_resource_start (dev, bir) + table_offset; 468 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 469 if (base == NULL) 470 return -ENOMEM; 471 472 /* MSI-X Table Initialization */ 473 for (i = 0; i < nvec; i++) { 474 entry = alloc_msi_entry(); 475 if (!entry) 476 break; 477 478 j = entries[i].entry; 479 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 480 entry->msi_attrib.is_64 = 1; 481 entry->msi_attrib.entry_nr = j; 482 entry->msi_attrib.maskbit = 1; 483 entry->msi_attrib.masked = 1; 484 entry->msi_attrib.default_irq = dev->irq; 485 entry->msi_attrib.pos = pos; 486 entry->dev = dev; 487 entry->mask_base = base; 488 489 list_add_tail(&entry->list, &dev->msi_list); 490 } 491 492 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 493 if (ret) { 494 int avail = 0; 495 list_for_each_entry(entry, &dev->msi_list, list) { 496 if (entry->irq != 0) { 497 avail++; 498 } 499 } 500 501 msi_free_irqs(dev); 502 503 /* If we had some success report the number of irqs 504 * we succeeded in setting up. 505 */ 506 if (avail == 0) 507 avail = ret; 508 return avail; 509 } 510 511 i = 0; 512 list_for_each_entry(entry, &dev->msi_list, list) { 513 entries[i].vector = entry->irq; 514 set_irq_msi(entry->irq, entry); 515 i++; 516 } 517 /* Set MSI-X enabled bits */ 518 pci_intx_for_msi(dev, 0); 519 msix_set_enable(dev, 1); 520 dev->msix_enabled = 1; 521 522 return 0; 523} 524 525/** 526 * pci_msi_check_device - check whether MSI may be enabled on a device 527 * @dev: pointer to the pci_dev data structure of MSI device function 528 * @nvec: how many MSIs have been requested ? 529 * @type: are we checking for MSI or MSI-X ? 530 * 531 * Look at global flags, the device itself, and its parent busses 532 * to determine if MSI/-X are supported for the device. If MSI/-X is 533 * supported return 0, else return an error code. 534 **/ 535static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) 536{ 537 struct pci_bus *bus; 538 int ret; 539 540 /* MSI must be globally enabled and supported by the device */ 541 if (!pci_msi_enable || !dev || dev->no_msi) 542 return -EINVAL; 543 544 /* 545 * You can't ask to have 0 or less MSIs configured. 546 * a) it's stupid .. 547 * b) the list manipulation code assumes nvec >= 1. 548 */ 549 if (nvec < 1) 550 return -ERANGE; 551 552 /* Any bridge which does NOT route MSI transactions from it's 553 * secondary bus to it's primary bus must set NO_MSI flag on 554 * the secondary pci_bus. 555 * We expect only arch-specific PCI host bus controller driver 556 * or quirks for specific PCI bridges to be setting NO_MSI. 557 */ 558 for (bus = dev->bus; bus; bus = bus->parent) 559 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 560 return -EINVAL; 561 562 ret = arch_msi_check_device(dev, nvec, type); 563 if (ret) 564 return ret; 565 566 if (!pci_find_capability(dev, type)) 567 return -EINVAL; 568 569 return 0; 570} 571 572/** 573 * pci_enable_msi - configure device's MSI capability structure 574 * @dev: pointer to the pci_dev data structure of MSI device function 575 * 576 * Setup the MSI capability structure of device function with 577 * a single MSI irq upon its software driver call to request for 578 * MSI mode enabled on its hardware device function. A return of zero 579 * indicates the successful setup of an entry zero with the new MSI 580 * irq or non-zero for otherwise. 581 **/ 582int pci_enable_msi(struct pci_dev* dev) 583{ 584 int status; 585 586 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 587 if (status) 588 return status; 589 590 WARN_ON(!!dev->msi_enabled); 591 592 /* Check whether driver already requested for MSI-X irqs */ 593 if (dev->msix_enabled) { 594 dev_info(&dev->dev, "can't enable MSI " 595 "(MSI-X already enabled)\n"); 596 return -EINVAL; 597 } 598 status = msi_capability_init(dev); 599 return status; 600} 601EXPORT_SYMBOL(pci_enable_msi); 602 603void pci_msi_shutdown(struct pci_dev* dev) 604{ 605 struct msi_desc *entry; 606 607 if (!pci_msi_enable || !dev || !dev->msi_enabled) 608 return; 609 610 msi_set_enable(dev, 0); 611 pci_intx_for_msi(dev, 1); 612 dev->msi_enabled = 0; 613 614 BUG_ON(list_empty(&dev->msi_list)); 615 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 616 /* Return the the pci reset with msi irqs unmasked */ 617 if (entry->msi_attrib.maskbit) { 618 u32 mask = entry->msi_attrib.maskbits_mask; 619 struct irq_desc *desc = irq_to_desc(dev->irq); 620 msi_set_mask_bits(desc, mask, ~mask); 621 } 622 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 623 return; 624 625 /* Restore dev->irq to its default pin-assertion irq */ 626 dev->irq = entry->msi_attrib.default_irq; 627} 628void pci_disable_msi(struct pci_dev* dev) 629{ 630 struct msi_desc *entry; 631 632 if (!pci_msi_enable || !dev || !dev->msi_enabled) 633 return; 634 635 pci_msi_shutdown(dev); 636 637 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 638 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 639 return; 640 641 msi_free_irqs(dev); 642} 643EXPORT_SYMBOL(pci_disable_msi); 644 645static int msi_free_irqs(struct pci_dev* dev) 646{ 647 struct msi_desc *entry, *tmp; 648 649 list_for_each_entry(entry, &dev->msi_list, list) { 650 if (entry->irq) 651 BUG_ON(irq_has_action(entry->irq)); 652 } 653 654 arch_teardown_msi_irqs(dev); 655 656 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 657 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { 658 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 659 * PCI_MSIX_ENTRY_SIZE 660 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 661 662 if (list_is_last(&entry->list, &dev->msi_list)) 663 iounmap(entry->mask_base); 664 } 665 list_del(&entry->list); 666 kfree(entry); 667 } 668 669 return 0; 670} 671 672/** 673 * pci_enable_msix - configure device's MSI-X capability structure 674 * @dev: pointer to the pci_dev data structure of MSI-X device function 675 * @entries: pointer to an array of MSI-X entries 676 * @nvec: number of MSI-X irqs requested for allocation by device driver 677 * 678 * Setup the MSI-X capability structure of device function with the number 679 * of requested irqs upon its software driver call to request for 680 * MSI-X mode enabled on its hardware device function. A return of zero 681 * indicates the successful configuration of MSI-X capability structure 682 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 683 * Or a return of > 0 indicates that driver request is exceeding the number 684 * of irqs available. Driver should use the returned value to re-send 685 * its request. 686 **/ 687int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 688{ 689 int status, pos, nr_entries; 690 int i, j; 691 u16 control; 692 693 if (!entries) 694 return -EINVAL; 695 696 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); 697 if (status) 698 return status; 699 700 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 701 pci_read_config_word(dev, msi_control_reg(pos), &control); 702 nr_entries = multi_msix_capable(control); 703 if (nvec > nr_entries) 704 return -EINVAL; 705 706 /* Check for any invalid entries */ 707 for (i = 0; i < nvec; i++) { 708 if (entries[i].entry >= nr_entries) 709 return -EINVAL; /* invalid entry */ 710 for (j = i + 1; j < nvec; j++) { 711 if (entries[i].entry == entries[j].entry) 712 return -EINVAL; /* duplicate entry */ 713 } 714 } 715 WARN_ON(!!dev->msix_enabled); 716 717 /* Check whether driver already requested for MSI irq */ 718 if (dev->msi_enabled) { 719 dev_info(&dev->dev, "can't enable MSI-X " 720 "(MSI IRQ already assigned)\n"); 721 return -EINVAL; 722 } 723 status = msix_capability_init(dev, entries, nvec); 724 return status; 725} 726EXPORT_SYMBOL(pci_enable_msix); 727 728static void msix_free_all_irqs(struct pci_dev *dev) 729{ 730 msi_free_irqs(dev); 731} 732 733void pci_msix_shutdown(struct pci_dev* dev) 734{ 735 if (!pci_msi_enable || !dev || !dev->msix_enabled) 736 return; 737 738 msix_set_enable(dev, 0); 739 pci_intx_for_msi(dev, 1); 740 dev->msix_enabled = 0; 741} 742void pci_disable_msix(struct pci_dev* dev) 743{ 744 if (!pci_msi_enable || !dev || !dev->msix_enabled) 745 return; 746 747 pci_msix_shutdown(dev); 748 749 msix_free_all_irqs(dev); 750} 751EXPORT_SYMBOL(pci_disable_msix); 752 753/** 754 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state 755 * @dev: pointer to the pci_dev data structure of MSI(X) device function 756 * 757 * Being called during hotplug remove, from which the device function 758 * is hot-removed. All previous assigned MSI/MSI-X irqs, if 759 * allocated for this device function, are reclaimed to unused state, 760 * which may be used later on. 761 **/ 762void msi_remove_pci_irq_vectors(struct pci_dev* dev) 763{ 764 if (!pci_msi_enable || !dev) 765 return; 766 767 if (dev->msi_enabled) 768 msi_free_irqs(dev); 769 770 if (dev->msix_enabled) 771 msix_free_all_irqs(dev); 772} 773 774void pci_no_msi(void) 775{ 776 pci_msi_enable = 0; 777} 778 779/** 780 * pci_msi_enabled - is MSI enabled? 781 * 782 * Returns true if MSI has not been disabled by the command-line option 783 * pci=nomsi. 784 **/ 785int pci_msi_enabled(void) 786{ 787 return pci_msi_enable; 788} 789EXPORT_SYMBOL(pci_msi_enabled); 790 791void pci_msi_init_pci_dev(struct pci_dev *dev) 792{ 793 INIT_LIST_HEAD(&dev->msi_list); 794} 795