msi.c revision 11df1f05514beaf0269484191007dbc8d47e0e6f
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 */ 8 9#include <linux/err.h> 10#include <linux/mm.h> 11#include <linux/irq.h> 12#include <linux/interrupt.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/pci.h> 16#include <linux/proc_fs.h> 17#include <linux/msi.h> 18#include <linux/smp.h> 19 20#include <asm/errno.h> 21#include <asm/io.h> 22 23#include "pci.h" 24#include "msi.h" 25 26static int pci_msi_enable = 1; 27 28/* Arch hooks */ 29 30#ifndef arch_msi_check_device 31int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) 32{ 33 return 0; 34} 35#endif 36 37#ifndef arch_setup_msi_irqs 38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 39{ 40 struct msi_desc *entry; 41 int ret; 42 43 list_for_each_entry(entry, &dev->msi_list, list) { 44 ret = arch_setup_msi_irq(dev, entry); 45 if (ret) 46 return ret; 47 } 48 49 return 0; 50} 51#endif 52 53#ifndef arch_teardown_msi_irqs 54void arch_teardown_msi_irqs(struct pci_dev *dev) 55{ 56 struct msi_desc *entry; 57 58 list_for_each_entry(entry, &dev->msi_list, list) { 59 if (entry->irq != 0) 60 arch_teardown_msi_irq(entry->irq); 61 } 62} 63#endif 64 65static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 66{ 67 u16 control; 68 69 if (pos) { 70 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 71 control &= ~PCI_MSI_FLAGS_ENABLE; 72 if (enable) 73 control |= PCI_MSI_FLAGS_ENABLE; 74 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 75 } 76} 77 78static void msi_set_enable(struct pci_dev *dev, int enable) 79{ 80 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); 81} 82 83static void msix_set_enable(struct pci_dev *dev, int enable) 84{ 85 int pos; 86 u16 control; 87 88 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 89 if (pos) { 90 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 91 control &= ~PCI_MSIX_FLAGS_ENABLE; 92 if (enable) 93 control |= PCI_MSIX_FLAGS_ENABLE; 94 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 95 } 96} 97 98static inline __attribute_const__ u32 msi_mask(unsigned x) 99{ 100 /* Don't shift by >= width of type */ 101 if (x >= 5) 102 return 0xffffffff; 103 return (1 << (1 << x)) - 1; 104} 105 106static void msix_flush_writes(struct irq_desc *desc) 107{ 108 struct msi_desc *entry; 109 110 entry = get_irq_desc_msi(desc); 111 BUG_ON(!entry || !entry->dev); 112 switch (entry->msi_attrib.type) { 113 case PCI_CAP_ID_MSI: 114 /* nothing to do */ 115 break; 116 case PCI_CAP_ID_MSIX: 117 { 118 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 119 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 120 readl(entry->mask_base + offset); 121 break; 122 } 123 default: 124 BUG(); 125 break; 126 } 127} 128 129/* 130 * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to 131 * mask all MSI interrupts by clearing the MSI enable bit does not work 132 * reliably as devices without an INTx disable bit will then generate a 133 * level IRQ which will never be cleared. 134 * 135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device 136 * doesn't support MSI masking. 137 */ 138static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) 139{ 140 struct msi_desc *entry; 141 142 entry = get_irq_desc_msi(desc); 143 BUG_ON(!entry || !entry->dev); 144 switch (entry->msi_attrib.type) { 145 case PCI_CAP_ID_MSI: 146 if (entry->msi_attrib.maskbit) { 147 int pos; 148 u32 mask_bits; 149 150 pos = (long)entry->mask_base; 151 pci_read_config_dword(entry->dev, pos, &mask_bits); 152 mask_bits &= ~(mask); 153 mask_bits |= flag & mask; 154 pci_write_config_dword(entry->dev, pos, mask_bits); 155 } else { 156 return 0; 157 } 158 break; 159 case PCI_CAP_ID_MSIX: 160 { 161 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 162 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 163 writel(flag, entry->mask_base + offset); 164 readl(entry->mask_base + offset); 165 break; 166 } 167 default: 168 BUG(); 169 break; 170 } 171 entry->msi_attrib.masked = !!flag; 172 return 1; 173} 174 175void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 176{ 177 struct msi_desc *entry = get_irq_desc_msi(desc); 178 switch(entry->msi_attrib.type) { 179 case PCI_CAP_ID_MSI: 180 { 181 struct pci_dev *dev = entry->dev; 182 int pos = entry->msi_attrib.pos; 183 u16 data; 184 185 pci_read_config_dword(dev, msi_lower_address_reg(pos), 186 &msg->address_lo); 187 if (entry->msi_attrib.is_64) { 188 pci_read_config_dword(dev, msi_upper_address_reg(pos), 189 &msg->address_hi); 190 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 191 } else { 192 msg->address_hi = 0; 193 pci_read_config_word(dev, msi_data_reg(pos, 0), &data); 194 } 195 msg->data = data; 196 break; 197 } 198 case PCI_CAP_ID_MSIX: 199 { 200 void __iomem *base; 201 base = entry->mask_base + 202 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 203 204 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 205 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 206 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); 207 break; 208 } 209 default: 210 BUG(); 211 } 212} 213 214void read_msi_msg(unsigned int irq, struct msi_msg *msg) 215{ 216 struct irq_desc *desc = irq_to_desc(irq); 217 218 read_msi_msg_desc(desc, msg); 219} 220 221void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 222{ 223 struct msi_desc *entry = get_irq_desc_msi(desc); 224 switch (entry->msi_attrib.type) { 225 case PCI_CAP_ID_MSI: 226 { 227 struct pci_dev *dev = entry->dev; 228 int pos = entry->msi_attrib.pos; 229 230 pci_write_config_dword(dev, msi_lower_address_reg(pos), 231 msg->address_lo); 232 if (entry->msi_attrib.is_64) { 233 pci_write_config_dword(dev, msi_upper_address_reg(pos), 234 msg->address_hi); 235 pci_write_config_word(dev, msi_data_reg(pos, 1), 236 msg->data); 237 } else { 238 pci_write_config_word(dev, msi_data_reg(pos, 0), 239 msg->data); 240 } 241 break; 242 } 243 case PCI_CAP_ID_MSIX: 244 { 245 void __iomem *base; 246 base = entry->mask_base + 247 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 248 249 writel(msg->address_lo, 250 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 251 writel(msg->address_hi, 252 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 253 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); 254 break; 255 } 256 default: 257 BUG(); 258 } 259 entry->msg = *msg; 260} 261 262void write_msi_msg(unsigned int irq, struct msi_msg *msg) 263{ 264 struct irq_desc *desc = irq_to_desc(irq); 265 266 write_msi_msg_desc(desc, msg); 267} 268 269void mask_msi_irq(unsigned int irq) 270{ 271 struct irq_desc *desc = irq_to_desc(irq); 272 273 msi_set_mask_bits(desc, 1, 1); 274 msix_flush_writes(desc); 275} 276 277void unmask_msi_irq(unsigned int irq) 278{ 279 struct irq_desc *desc = irq_to_desc(irq); 280 281 msi_set_mask_bits(desc, 1, 0); 282 msix_flush_writes(desc); 283} 284 285static int msi_free_irqs(struct pci_dev* dev); 286 287static struct msi_desc* alloc_msi_entry(void) 288{ 289 struct msi_desc *entry; 290 291 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL); 292 if (!entry) 293 return NULL; 294 295 INIT_LIST_HEAD(&entry->list); 296 entry->irq = 0; 297 entry->dev = NULL; 298 299 return entry; 300} 301 302static void pci_intx_for_msi(struct pci_dev *dev, int enable) 303{ 304 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) 305 pci_intx(dev, enable); 306} 307 308static void __pci_restore_msi_state(struct pci_dev *dev) 309{ 310 int pos; 311 u16 control; 312 struct msi_desc *entry; 313 314 if (!dev->msi_enabled) 315 return; 316 317 entry = get_irq_msi(dev->irq); 318 pos = entry->msi_attrib.pos; 319 320 pci_intx_for_msi(dev, 0); 321 msi_set_enable(dev, 0); 322 write_msi_msg(dev->irq, &entry->msg); 323 if (entry->msi_attrib.maskbit) { 324 struct irq_desc *desc = irq_to_desc(dev->irq); 325 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, 326 entry->msi_attrib.masked); 327 } 328 329 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 330 control &= ~PCI_MSI_FLAGS_QSIZE; 331 control |= PCI_MSI_FLAGS_ENABLE; 332 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 333} 334 335static void __pci_restore_msix_state(struct pci_dev *dev) 336{ 337 int pos; 338 struct msi_desc *entry; 339 u16 control; 340 341 if (!dev->msix_enabled) 342 return; 343 344 /* route the table */ 345 pci_intx_for_msi(dev, 0); 346 msix_set_enable(dev, 0); 347 348 list_for_each_entry(entry, &dev->msi_list, list) { 349 struct irq_desc *desc = irq_to_desc(entry->irq); 350 write_msi_msg(entry->irq, &entry->msg); 351 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); 352 } 353 354 BUG_ON(list_empty(&dev->msi_list)); 355 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 356 pos = entry->msi_attrib.pos; 357 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 358 control &= ~PCI_MSIX_FLAGS_MASKALL; 359 control |= PCI_MSIX_FLAGS_ENABLE; 360 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 361} 362 363void pci_restore_msi_state(struct pci_dev *dev) 364{ 365 __pci_restore_msi_state(dev); 366 __pci_restore_msix_state(dev); 367} 368EXPORT_SYMBOL_GPL(pci_restore_msi_state); 369 370/** 371 * msi_capability_init - configure device's MSI capability structure 372 * @dev: pointer to the pci_dev data structure of MSI device function 373 * 374 * Setup the MSI capability structure of device function with a single 375 * MSI irq, regardless of device function is capable of handling 376 * multiple messages. A return of zero indicates the successful setup 377 * of an entry zero with the new MSI irq or non-zero for otherwise. 378 **/ 379static int msi_capability_init(struct pci_dev *dev) 380{ 381 struct msi_desc *entry; 382 int pos, ret; 383 u16 control; 384 385 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ 386 387 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 388 pci_read_config_word(dev, msi_control_reg(pos), &control); 389 /* MSI Entry Initialization */ 390 entry = alloc_msi_entry(); 391 if (!entry) 392 return -ENOMEM; 393 394 entry->msi_attrib.type = PCI_CAP_ID_MSI; 395 entry->msi_attrib.is_64 = is_64bit_address(control); 396 entry->msi_attrib.entry_nr = 0; 397 entry->msi_attrib.maskbit = is_mask_bit_support(control); 398 entry->msi_attrib.masked = 1; 399 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 400 entry->msi_attrib.pos = pos; 401 entry->dev = dev; 402 if (entry->msi_attrib.maskbit) { 403 unsigned int base, maskbits, temp; 404 405 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 406 entry->mask_base = (void __iomem *)(long)base; 407 408 /* All MSIs are unmasked by default, Mask them all */ 409 pci_read_config_dword(dev, base, &maskbits); 410 temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1); 411 maskbits |= temp; 412 pci_write_config_dword(dev, base, maskbits); 413 entry->msi_attrib.maskbits_mask = temp; 414 } 415 list_add_tail(&entry->list, &dev->msi_list); 416 417 /* Configure MSI capability structure */ 418 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 419 if (ret) { 420 msi_free_irqs(dev); 421 return ret; 422 } 423 424 /* Set MSI enabled bits */ 425 pci_intx_for_msi(dev, 0); 426 msi_set_enable(dev, 1); 427 dev->msi_enabled = 1; 428 429 dev->irq = entry->irq; 430 return 0; 431} 432 433/** 434 * msix_capability_init - configure device's MSI-X capability 435 * @dev: pointer to the pci_dev data structure of MSI-X device function 436 * @entries: pointer to an array of struct msix_entry entries 437 * @nvec: number of @entries 438 * 439 * Setup the MSI-X capability structure of device function with a 440 * single MSI-X irq. A return of zero indicates the successful setup of 441 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 442 **/ 443static int msix_capability_init(struct pci_dev *dev, 444 struct msix_entry *entries, int nvec) 445{ 446 struct msi_desc *entry; 447 int pos, i, j, nr_entries, ret; 448 unsigned long phys_addr; 449 u32 table_offset; 450 u16 control; 451 u8 bir; 452 void __iomem *base; 453 454 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ 455 456 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 457 /* Request & Map MSI-X table region */ 458 pci_read_config_word(dev, msi_control_reg(pos), &control); 459 nr_entries = multi_msix_capable(control); 460 461 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 462 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 463 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 464 phys_addr = pci_resource_start (dev, bir) + table_offset; 465 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 466 if (base == NULL) 467 return -ENOMEM; 468 469 /* MSI-X Table Initialization */ 470 for (i = 0; i < nvec; i++) { 471 entry = alloc_msi_entry(); 472 if (!entry) 473 break; 474 475 j = entries[i].entry; 476 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 477 entry->msi_attrib.is_64 = 1; 478 entry->msi_attrib.entry_nr = j; 479 entry->msi_attrib.maskbit = 1; 480 entry->msi_attrib.masked = 1; 481 entry->msi_attrib.default_irq = dev->irq; 482 entry->msi_attrib.pos = pos; 483 entry->dev = dev; 484 entry->mask_base = base; 485 486 list_add_tail(&entry->list, &dev->msi_list); 487 } 488 489 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 490 if (ret) { 491 int avail = 0; 492 list_for_each_entry(entry, &dev->msi_list, list) { 493 if (entry->irq != 0) { 494 avail++; 495 } 496 } 497 498 msi_free_irqs(dev); 499 500 /* If we had some success report the number of irqs 501 * we succeeded in setting up. 502 */ 503 if (avail == 0) 504 avail = ret; 505 return avail; 506 } 507 508 i = 0; 509 list_for_each_entry(entry, &dev->msi_list, list) { 510 entries[i].vector = entry->irq; 511 set_irq_msi(entry->irq, entry); 512 i++; 513 } 514 /* Set MSI-X enabled bits */ 515 pci_intx_for_msi(dev, 0); 516 msix_set_enable(dev, 1); 517 dev->msix_enabled = 1; 518 519 return 0; 520} 521 522/** 523 * pci_msi_check_device - check whether MSI may be enabled on a device 524 * @dev: pointer to the pci_dev data structure of MSI device function 525 * @nvec: how many MSIs have been requested ? 526 * @type: are we checking for MSI or MSI-X ? 527 * 528 * Look at global flags, the device itself, and its parent busses 529 * to determine if MSI/-X are supported for the device. If MSI/-X is 530 * supported return 0, else return an error code. 531 **/ 532static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) 533{ 534 struct pci_bus *bus; 535 int ret; 536 537 /* MSI must be globally enabled and supported by the device */ 538 if (!pci_msi_enable || !dev || dev->no_msi) 539 return -EINVAL; 540 541 /* 542 * You can't ask to have 0 or less MSIs configured. 543 * a) it's stupid .. 544 * b) the list manipulation code assumes nvec >= 1. 545 */ 546 if (nvec < 1) 547 return -ERANGE; 548 549 /* Any bridge which does NOT route MSI transactions from it's 550 * secondary bus to it's primary bus must set NO_MSI flag on 551 * the secondary pci_bus. 552 * We expect only arch-specific PCI host bus controller driver 553 * or quirks for specific PCI bridges to be setting NO_MSI. 554 */ 555 for (bus = dev->bus; bus; bus = bus->parent) 556 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 557 return -EINVAL; 558 559 ret = arch_msi_check_device(dev, nvec, type); 560 if (ret) 561 return ret; 562 563 if (!pci_find_capability(dev, type)) 564 return -EINVAL; 565 566 return 0; 567} 568 569/** 570 * pci_enable_msi - configure device's MSI capability structure 571 * @dev: pointer to the pci_dev data structure of MSI device function 572 * 573 * Setup the MSI capability structure of device function with 574 * a single MSI irq upon its software driver call to request for 575 * MSI mode enabled on its hardware device function. A return of zero 576 * indicates the successful setup of an entry zero with the new MSI 577 * irq or non-zero for otherwise. 578 **/ 579int pci_enable_msi(struct pci_dev* dev) 580{ 581 int status; 582 583 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 584 if (status) 585 return status; 586 587 WARN_ON(!!dev->msi_enabled); 588 589 /* Check whether driver already requested for MSI-X irqs */ 590 if (dev->msix_enabled) { 591 dev_info(&dev->dev, "can't enable MSI " 592 "(MSI-X already enabled)\n"); 593 return -EINVAL; 594 } 595 status = msi_capability_init(dev); 596 return status; 597} 598EXPORT_SYMBOL(pci_enable_msi); 599 600void pci_msi_shutdown(struct pci_dev* dev) 601{ 602 struct msi_desc *entry; 603 604 if (!pci_msi_enable || !dev || !dev->msi_enabled) 605 return; 606 607 msi_set_enable(dev, 0); 608 pci_intx_for_msi(dev, 1); 609 dev->msi_enabled = 0; 610 611 BUG_ON(list_empty(&dev->msi_list)); 612 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 613 /* Return the the pci reset with msi irqs unmasked */ 614 if (entry->msi_attrib.maskbit) { 615 u32 mask = entry->msi_attrib.maskbits_mask; 616 struct irq_desc *desc = irq_to_desc(dev->irq); 617 msi_set_mask_bits(desc, mask, ~mask); 618 } 619 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 620 return; 621 622 /* Restore dev->irq to its default pin-assertion irq */ 623 dev->irq = entry->msi_attrib.default_irq; 624} 625void pci_disable_msi(struct pci_dev* dev) 626{ 627 struct msi_desc *entry; 628 629 if (!pci_msi_enable || !dev || !dev->msi_enabled) 630 return; 631 632 pci_msi_shutdown(dev); 633 634 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 635 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 636 return; 637 638 msi_free_irqs(dev); 639} 640EXPORT_SYMBOL(pci_disable_msi); 641 642static int msi_free_irqs(struct pci_dev* dev) 643{ 644 struct msi_desc *entry, *tmp; 645 646 list_for_each_entry(entry, &dev->msi_list, list) { 647 if (entry->irq) 648 BUG_ON(irq_has_action(entry->irq)); 649 } 650 651 arch_teardown_msi_irqs(dev); 652 653 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 654 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { 655 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 656 * PCI_MSIX_ENTRY_SIZE 657 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 658 659 if (list_is_last(&entry->list, &dev->msi_list)) 660 iounmap(entry->mask_base); 661 } 662 list_del(&entry->list); 663 kfree(entry); 664 } 665 666 return 0; 667} 668 669/** 670 * pci_msix_table_size - return the number of device's MSI-X table entries 671 * @dev: pointer to the pci_dev data structure of MSI-X device function 672 */ 673int pci_msix_table_size(struct pci_dev *dev) 674{ 675 int pos; 676 u16 control; 677 678 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 679 if (!pos) 680 return 0; 681 682 pci_read_config_word(dev, msi_control_reg(pos), &control); 683 return multi_msix_capable(control); 684} 685 686/** 687 * pci_enable_msix - configure device's MSI-X capability structure 688 * @dev: pointer to the pci_dev data structure of MSI-X device function 689 * @entries: pointer to an array of MSI-X entries 690 * @nvec: number of MSI-X irqs requested for allocation by device driver 691 * 692 * Setup the MSI-X capability structure of device function with the number 693 * of requested irqs upon its software driver call to request for 694 * MSI-X mode enabled on its hardware device function. A return of zero 695 * indicates the successful configuration of MSI-X capability structure 696 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 697 * Or a return of > 0 indicates that driver request is exceeding the number 698 * of irqs available. Driver should use the returned value to re-send 699 * its request. 700 **/ 701int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 702{ 703 int status, nr_entries; 704 int i, j; 705 706 if (!entries) 707 return -EINVAL; 708 709 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); 710 if (status) 711 return status; 712 713 nr_entries = pci_msix_table_size(dev); 714 if (nvec > nr_entries) 715 return -EINVAL; 716 717 /* Check for any invalid entries */ 718 for (i = 0; i < nvec; i++) { 719 if (entries[i].entry >= nr_entries) 720 return -EINVAL; /* invalid entry */ 721 for (j = i + 1; j < nvec; j++) { 722 if (entries[i].entry == entries[j].entry) 723 return -EINVAL; /* duplicate entry */ 724 } 725 } 726 WARN_ON(!!dev->msix_enabled); 727 728 /* Check whether driver already requested for MSI irq */ 729 if (dev->msi_enabled) { 730 dev_info(&dev->dev, "can't enable MSI-X " 731 "(MSI IRQ already assigned)\n"); 732 return -EINVAL; 733 } 734 status = msix_capability_init(dev, entries, nvec); 735 return status; 736} 737EXPORT_SYMBOL(pci_enable_msix); 738 739static void msix_free_all_irqs(struct pci_dev *dev) 740{ 741 msi_free_irqs(dev); 742} 743 744void pci_msix_shutdown(struct pci_dev* dev) 745{ 746 if (!pci_msi_enable || !dev || !dev->msix_enabled) 747 return; 748 749 msix_set_enable(dev, 0); 750 pci_intx_for_msi(dev, 1); 751 dev->msix_enabled = 0; 752} 753void pci_disable_msix(struct pci_dev* dev) 754{ 755 if (!pci_msi_enable || !dev || !dev->msix_enabled) 756 return; 757 758 pci_msix_shutdown(dev); 759 760 msix_free_all_irqs(dev); 761} 762EXPORT_SYMBOL(pci_disable_msix); 763 764/** 765 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state 766 * @dev: pointer to the pci_dev data structure of MSI(X) device function 767 * 768 * Being called during hotplug remove, from which the device function 769 * is hot-removed. All previous assigned MSI/MSI-X irqs, if 770 * allocated for this device function, are reclaimed to unused state, 771 * which may be used later on. 772 **/ 773void msi_remove_pci_irq_vectors(struct pci_dev* dev) 774{ 775 if (!pci_msi_enable || !dev) 776 return; 777 778 if (dev->msi_enabled) 779 msi_free_irqs(dev); 780 781 if (dev->msix_enabled) 782 msix_free_all_irqs(dev); 783} 784 785void pci_no_msi(void) 786{ 787 pci_msi_enable = 0; 788} 789 790/** 791 * pci_msi_enabled - is MSI enabled? 792 * 793 * Returns true if MSI has not been disabled by the command-line option 794 * pci=nomsi. 795 **/ 796int pci_msi_enabled(void) 797{ 798 return pci_msi_enable; 799} 800EXPORT_SYMBOL(pci_msi_enabled); 801 802void pci_msi_init_pci_dev(struct pci_dev *dev) 803{ 804 INIT_LIST_HEAD(&dev->msi_list); 805} 806