msi.c revision a52e2e3513d4beafe8fe8699f1519b021c2d05ba
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 */ 8 9#include <linux/err.h> 10#include <linux/mm.h> 11#include <linux/irq.h> 12#include <linux/interrupt.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/pci.h> 16#include <linux/proc_fs.h> 17#include <linux/msi.h> 18#include <linux/smp.h> 19 20#include <asm/errno.h> 21#include <asm/io.h> 22 23#include "pci.h" 24#include "msi.h" 25 26static int pci_msi_enable = 1; 27 28/* Arch hooks */ 29 30int __attribute__ ((weak)) 31arch_msi_check_device(struct pci_dev *dev, int nvec, int type) 32{ 33 return 0; 34} 35 36int __attribute__ ((weak)) 37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) 38{ 39 return 0; 40} 41 42int __attribute__ ((weak)) 43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 44{ 45 struct msi_desc *entry; 46 int ret; 47 48 list_for_each_entry(entry, &dev->msi_list, list) { 49 ret = arch_setup_msi_irq(dev, entry); 50 if (ret) 51 return ret; 52 } 53 54 return 0; 55} 56 57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) 58{ 59 return; 60} 61 62void __attribute__ ((weak)) 63arch_teardown_msi_irqs(struct pci_dev *dev) 64{ 65 struct msi_desc *entry; 66 67 list_for_each_entry(entry, &dev->msi_list, list) { 68 if (entry->irq != 0) 69 arch_teardown_msi_irq(entry->irq); 70 } 71} 72 73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 74{ 75 u16 control; 76 77 if (pos) { 78 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 79 control &= ~PCI_MSI_FLAGS_ENABLE; 80 if (enable) 81 control |= PCI_MSI_FLAGS_ENABLE; 82 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 83 } 84} 85 86static void msi_set_enable(struct pci_dev *dev, int enable) 87{ 88 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); 89} 90 91static void msix_set_enable(struct pci_dev *dev, int enable) 92{ 93 int pos; 94 u16 control; 95 96 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 97 if (pos) { 98 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 99 control &= ~PCI_MSIX_FLAGS_ENABLE; 100 if (enable) 101 control |= PCI_MSIX_FLAGS_ENABLE; 102 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 103 } 104} 105 106static inline __attribute_const__ u32 msi_mask(unsigned x) 107{ 108 /* Don't shift by >= width of type */ 109 if (x >= 5) 110 return 0xffffffff; 111 return (1 << (1 << x)) - 1; 112} 113 114static void msix_flush_writes(struct irq_desc *desc) 115{ 116 struct msi_desc *entry; 117 118 entry = get_irq_desc_msi(desc); 119 BUG_ON(!entry || !entry->dev); 120 switch (entry->msi_attrib.type) { 121 case PCI_CAP_ID_MSI: 122 /* nothing to do */ 123 break; 124 case PCI_CAP_ID_MSIX: 125 { 126 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 127 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 128 readl(entry->mask_base + offset); 129 break; 130 } 131 default: 132 BUG(); 133 break; 134 } 135} 136 137/* 138 * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to 139 * mask all MSI interrupts by clearing the MSI enable bit does not work 140 * reliably as devices without an INTx disable bit will then generate a 141 * level IRQ which will never be cleared. 142 * 143 * Returns 1 if it succeeded in masking the interrupt and 0 if the device 144 * doesn't support MSI masking. 145 */ 146static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) 147{ 148 struct msi_desc *entry; 149 150 entry = get_irq_desc_msi(desc); 151 BUG_ON(!entry || !entry->dev); 152 switch (entry->msi_attrib.type) { 153 case PCI_CAP_ID_MSI: 154 if (entry->msi_attrib.maskbit) { 155 int pos; 156 u32 mask_bits; 157 158 pos = (long)entry->mask_base; 159 pci_read_config_dword(entry->dev, pos, &mask_bits); 160 mask_bits &= ~(mask); 161 mask_bits |= flag & mask; 162 pci_write_config_dword(entry->dev, pos, mask_bits); 163 } else { 164 return 0; 165 } 166 break; 167 case PCI_CAP_ID_MSIX: 168 { 169 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 170 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 171 writel(flag, entry->mask_base + offset); 172 readl(entry->mask_base + offset); 173 break; 174 } 175 default: 176 BUG(); 177 break; 178 } 179 entry->msi_attrib.masked = !!flag; 180 return 1; 181} 182 183void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 184{ 185 struct msi_desc *entry = get_irq_desc_msi(desc); 186 switch(entry->msi_attrib.type) { 187 case PCI_CAP_ID_MSI: 188 { 189 struct pci_dev *dev = entry->dev; 190 int pos = entry->msi_attrib.pos; 191 u16 data; 192 193 pci_read_config_dword(dev, msi_lower_address_reg(pos), 194 &msg->address_lo); 195 if (entry->msi_attrib.is_64) { 196 pci_read_config_dword(dev, msi_upper_address_reg(pos), 197 &msg->address_hi); 198 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 199 } else { 200 msg->address_hi = 0; 201 pci_read_config_word(dev, msi_data_reg(pos, 0), &data); 202 } 203 msg->data = data; 204 break; 205 } 206 case PCI_CAP_ID_MSIX: 207 { 208 void __iomem *base; 209 base = entry->mask_base + 210 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 211 212 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 213 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 214 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); 215 break; 216 } 217 default: 218 BUG(); 219 } 220} 221 222void read_msi_msg(unsigned int irq, struct msi_msg *msg) 223{ 224 struct irq_desc *desc = irq_to_desc(irq); 225 226 read_msi_msg_desc(desc, msg); 227} 228 229void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 230{ 231 struct msi_desc *entry = get_irq_desc_msi(desc); 232 switch (entry->msi_attrib.type) { 233 case PCI_CAP_ID_MSI: 234 { 235 struct pci_dev *dev = entry->dev; 236 int pos = entry->msi_attrib.pos; 237 238 pci_write_config_dword(dev, msi_lower_address_reg(pos), 239 msg->address_lo); 240 if (entry->msi_attrib.is_64) { 241 pci_write_config_dword(dev, msi_upper_address_reg(pos), 242 msg->address_hi); 243 pci_write_config_word(dev, msi_data_reg(pos, 1), 244 msg->data); 245 } else { 246 pci_write_config_word(dev, msi_data_reg(pos, 0), 247 msg->data); 248 } 249 break; 250 } 251 case PCI_CAP_ID_MSIX: 252 { 253 void __iomem *base; 254 base = entry->mask_base + 255 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 256 257 writel(msg->address_lo, 258 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 259 writel(msg->address_hi, 260 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 261 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); 262 break; 263 } 264 default: 265 BUG(); 266 } 267 entry->msg = *msg; 268} 269 270void write_msi_msg(unsigned int irq, struct msi_msg *msg) 271{ 272 struct irq_desc *desc = irq_to_desc(irq); 273 274 write_msi_msg_desc(desc, msg); 275} 276 277void mask_msi_irq(unsigned int irq) 278{ 279 struct irq_desc *desc = irq_to_desc(irq); 280 281 msi_set_mask_bits(desc, 1, 1); 282 msix_flush_writes(desc); 283} 284 285void unmask_msi_irq(unsigned int irq) 286{ 287 struct irq_desc *desc = irq_to_desc(irq); 288 289 msi_set_mask_bits(desc, 1, 0); 290 msix_flush_writes(desc); 291} 292 293static int msi_free_irqs(struct pci_dev* dev); 294 295static struct msi_desc* alloc_msi_entry(void) 296{ 297 struct msi_desc *entry; 298 299 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL); 300 if (!entry) 301 return NULL; 302 303 INIT_LIST_HEAD(&entry->list); 304 entry->irq = 0; 305 entry->dev = NULL; 306 307 return entry; 308} 309 310static void pci_intx_for_msi(struct pci_dev *dev, int enable) 311{ 312 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) 313 pci_intx(dev, enable); 314} 315 316static void __pci_restore_msi_state(struct pci_dev *dev) 317{ 318 int pos; 319 u16 control; 320 struct msi_desc *entry; 321 322 if (!dev->msi_enabled) 323 return; 324 325 entry = get_irq_msi(dev->irq); 326 pos = entry->msi_attrib.pos; 327 328 pci_intx_for_msi(dev, 0); 329 msi_set_enable(dev, 0); 330 write_msi_msg(dev->irq, &entry->msg); 331 if (entry->msi_attrib.maskbit) { 332 struct irq_desc *desc = irq_to_desc(dev->irq); 333 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, 334 entry->msi_attrib.masked); 335 } 336 337 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 338 control &= ~PCI_MSI_FLAGS_QSIZE; 339 control |= PCI_MSI_FLAGS_ENABLE; 340 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 341} 342 343static void __pci_restore_msix_state(struct pci_dev *dev) 344{ 345 int pos; 346 struct msi_desc *entry; 347 u16 control; 348 349 if (!dev->msix_enabled) 350 return; 351 352 /* route the table */ 353 pci_intx_for_msi(dev, 0); 354 msix_set_enable(dev, 0); 355 356 list_for_each_entry(entry, &dev->msi_list, list) { 357 struct irq_desc *desc = irq_to_desc(entry->irq); 358 write_msi_msg(entry->irq, &entry->msg); 359 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); 360 } 361 362 BUG_ON(list_empty(&dev->msi_list)); 363 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 364 pos = entry->msi_attrib.pos; 365 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 366 control &= ~PCI_MSIX_FLAGS_MASKALL; 367 control |= PCI_MSIX_FLAGS_ENABLE; 368 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 369} 370 371void pci_restore_msi_state(struct pci_dev *dev) 372{ 373 __pci_restore_msi_state(dev); 374 __pci_restore_msix_state(dev); 375} 376EXPORT_SYMBOL_GPL(pci_restore_msi_state); 377 378/** 379 * msi_capability_init - configure device's MSI capability structure 380 * @dev: pointer to the pci_dev data structure of MSI device function 381 * 382 * Setup the MSI capability structure of device function with a single 383 * MSI irq, regardless of device function is capable of handling 384 * multiple messages. A return of zero indicates the successful setup 385 * of an entry zero with the new MSI irq or non-zero for otherwise. 386 **/ 387static int msi_capability_init(struct pci_dev *dev) 388{ 389 struct msi_desc *entry; 390 int pos, ret; 391 u16 control; 392 393 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ 394 395 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 396 pci_read_config_word(dev, msi_control_reg(pos), &control); 397 /* MSI Entry Initialization */ 398 entry = alloc_msi_entry(); 399 if (!entry) 400 return -ENOMEM; 401 402 entry->msi_attrib.type = PCI_CAP_ID_MSI; 403 entry->msi_attrib.is_64 = is_64bit_address(control); 404 entry->msi_attrib.entry_nr = 0; 405 entry->msi_attrib.maskbit = is_mask_bit_support(control); 406 entry->msi_attrib.masked = 1; 407 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 408 entry->msi_attrib.pos = pos; 409 entry->dev = dev; 410 if (entry->msi_attrib.maskbit) { 411 unsigned int base, maskbits, temp; 412 413 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 414 entry->mask_base = (void __iomem *)(long)base; 415 416 /* All MSIs are unmasked by default, Mask them all */ 417 pci_read_config_dword(dev, base, &maskbits); 418 temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1); 419 maskbits |= temp; 420 pci_write_config_dword(dev, base, maskbits); 421 entry->msi_attrib.maskbits_mask = temp; 422 } 423 list_add_tail(&entry->list, &dev->msi_list); 424 425 /* Configure MSI capability structure */ 426 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 427 if (ret) { 428 msi_free_irqs(dev); 429 return ret; 430 } 431 432 /* Set MSI enabled bits */ 433 pci_intx_for_msi(dev, 0); 434 msi_set_enable(dev, 1); 435 dev->msi_enabled = 1; 436 437 dev->irq = entry->irq; 438 return 0; 439} 440 441/** 442 * msix_capability_init - configure device's MSI-X capability 443 * @dev: pointer to the pci_dev data structure of MSI-X device function 444 * @entries: pointer to an array of struct msix_entry entries 445 * @nvec: number of @entries 446 * 447 * Setup the MSI-X capability structure of device function with a 448 * single MSI-X irq. A return of zero indicates the successful setup of 449 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 450 **/ 451static int msix_capability_init(struct pci_dev *dev, 452 struct msix_entry *entries, int nvec) 453{ 454 struct msi_desc *entry; 455 int pos, i, j, nr_entries, ret; 456 unsigned long phys_addr; 457 u32 table_offset; 458 u16 control; 459 u8 bir; 460 void __iomem *base; 461 462 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ 463 464 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 465 /* Request & Map MSI-X table region */ 466 pci_read_config_word(dev, msi_control_reg(pos), &control); 467 nr_entries = multi_msix_capable(control); 468 469 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 470 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 471 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 472 phys_addr = pci_resource_start (dev, bir) + table_offset; 473 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 474 if (base == NULL) 475 return -ENOMEM; 476 477 /* MSI-X Table Initialization */ 478 for (i = 0; i < nvec; i++) { 479 entry = alloc_msi_entry(); 480 if (!entry) 481 break; 482 483 j = entries[i].entry; 484 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 485 entry->msi_attrib.is_64 = 1; 486 entry->msi_attrib.entry_nr = j; 487 entry->msi_attrib.maskbit = 1; 488 entry->msi_attrib.masked = 1; 489 entry->msi_attrib.default_irq = dev->irq; 490 entry->msi_attrib.pos = pos; 491 entry->dev = dev; 492 entry->mask_base = base; 493 494 list_add_tail(&entry->list, &dev->msi_list); 495 } 496 497 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 498 if (ret) { 499 int avail = 0; 500 list_for_each_entry(entry, &dev->msi_list, list) { 501 if (entry->irq != 0) { 502 avail++; 503 } 504 } 505 506 msi_free_irqs(dev); 507 508 /* If we had some success report the number of irqs 509 * we succeeded in setting up. 510 */ 511 if (avail == 0) 512 avail = ret; 513 return avail; 514 } 515 516 i = 0; 517 list_for_each_entry(entry, &dev->msi_list, list) { 518 entries[i].vector = entry->irq; 519 set_irq_msi(entry->irq, entry); 520 i++; 521 } 522 /* Set MSI-X enabled bits */ 523 pci_intx_for_msi(dev, 0); 524 msix_set_enable(dev, 1); 525 dev->msix_enabled = 1; 526 527 return 0; 528} 529 530/** 531 * pci_msi_check_device - check whether MSI may be enabled on a device 532 * @dev: pointer to the pci_dev data structure of MSI device function 533 * @nvec: how many MSIs have been requested ? 534 * @type: are we checking for MSI or MSI-X ? 535 * 536 * Look at global flags, the device itself, and its parent busses 537 * to determine if MSI/-X are supported for the device. If MSI/-X is 538 * supported return 0, else return an error code. 539 **/ 540static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) 541{ 542 struct pci_bus *bus; 543 int ret; 544 545 /* MSI must be globally enabled and supported by the device */ 546 if (!pci_msi_enable || !dev || dev->no_msi) 547 return -EINVAL; 548 549 /* 550 * You can't ask to have 0 or less MSIs configured. 551 * a) it's stupid .. 552 * b) the list manipulation code assumes nvec >= 1. 553 */ 554 if (nvec < 1) 555 return -ERANGE; 556 557 /* Any bridge which does NOT route MSI transactions from it's 558 * secondary bus to it's primary bus must set NO_MSI flag on 559 * the secondary pci_bus. 560 * We expect only arch-specific PCI host bus controller driver 561 * or quirks for specific PCI bridges to be setting NO_MSI. 562 */ 563 for (bus = dev->bus; bus; bus = bus->parent) 564 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 565 return -EINVAL; 566 567 ret = arch_msi_check_device(dev, nvec, type); 568 if (ret) 569 return ret; 570 571 if (!pci_find_capability(dev, type)) 572 return -EINVAL; 573 574 return 0; 575} 576 577/** 578 * pci_enable_msi - configure device's MSI capability structure 579 * @dev: pointer to the pci_dev data structure of MSI device function 580 * 581 * Setup the MSI capability structure of device function with 582 * a single MSI irq upon its software driver call to request for 583 * MSI mode enabled on its hardware device function. A return of zero 584 * indicates the successful setup of an entry zero with the new MSI 585 * irq or non-zero for otherwise. 586 **/ 587int pci_enable_msi(struct pci_dev* dev) 588{ 589 int status; 590 591 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 592 if (status) 593 return status; 594 595 WARN_ON(!!dev->msi_enabled); 596 597 /* Check whether driver already requested for MSI-X irqs */ 598 if (dev->msix_enabled) { 599 dev_info(&dev->dev, "can't enable MSI " 600 "(MSI-X already enabled)\n"); 601 return -EINVAL; 602 } 603 status = msi_capability_init(dev); 604 return status; 605} 606EXPORT_SYMBOL(pci_enable_msi); 607 608void pci_msi_shutdown(struct pci_dev* dev) 609{ 610 struct msi_desc *entry; 611 612 if (!pci_msi_enable || !dev || !dev->msi_enabled) 613 return; 614 615 msi_set_enable(dev, 0); 616 pci_intx_for_msi(dev, 1); 617 dev->msi_enabled = 0; 618 619 BUG_ON(list_empty(&dev->msi_list)); 620 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 621 /* Return the the pci reset with msi irqs unmasked */ 622 if (entry->msi_attrib.maskbit) { 623 u32 mask = entry->msi_attrib.maskbits_mask; 624 struct irq_desc *desc = irq_to_desc(dev->irq); 625 msi_set_mask_bits(desc, mask, ~mask); 626 } 627 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 628 return; 629 630 /* Restore dev->irq to its default pin-assertion irq */ 631 dev->irq = entry->msi_attrib.default_irq; 632} 633void pci_disable_msi(struct pci_dev* dev) 634{ 635 struct msi_desc *entry; 636 637 if (!pci_msi_enable || !dev || !dev->msi_enabled) 638 return; 639 640 pci_msi_shutdown(dev); 641 642 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 643 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 644 return; 645 646 msi_free_irqs(dev); 647} 648EXPORT_SYMBOL(pci_disable_msi); 649 650static int msi_free_irqs(struct pci_dev* dev) 651{ 652 struct msi_desc *entry, *tmp; 653 654 list_for_each_entry(entry, &dev->msi_list, list) { 655 if (entry->irq) 656 BUG_ON(irq_has_action(entry->irq)); 657 } 658 659 arch_teardown_msi_irqs(dev); 660 661 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 662 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { 663 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 664 * PCI_MSIX_ENTRY_SIZE 665 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 666 667 if (list_is_last(&entry->list, &dev->msi_list)) 668 iounmap(entry->mask_base); 669 } 670 list_del(&entry->list); 671 kfree(entry); 672 } 673 674 return 0; 675} 676 677/** 678 * pci_msix_table_size - return the number of device's MSI-X table entries 679 * @dev: pointer to the pci_dev data structure of MSI-X device function 680 */ 681int pci_msix_table_size(struct pci_dev *dev) 682{ 683 int pos; 684 u16 control; 685 686 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 687 if (!pos) 688 return 0; 689 690 pci_read_config_word(dev, msi_control_reg(pos), &control); 691 return multi_msix_capable(control); 692} 693 694/** 695 * pci_enable_msix - configure device's MSI-X capability structure 696 * @dev: pointer to the pci_dev data structure of MSI-X device function 697 * @entries: pointer to an array of MSI-X entries 698 * @nvec: number of MSI-X irqs requested for allocation by device driver 699 * 700 * Setup the MSI-X capability structure of device function with the number 701 * of requested irqs upon its software driver call to request for 702 * MSI-X mode enabled on its hardware device function. A return of zero 703 * indicates the successful configuration of MSI-X capability structure 704 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 705 * Or a return of > 0 indicates that driver request is exceeding the number 706 * of irqs available. Driver should use the returned value to re-send 707 * its request. 708 **/ 709int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 710{ 711 int status, nr_entries; 712 int i, j; 713 714 if (!entries) 715 return -EINVAL; 716 717 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); 718 if (status) 719 return status; 720 721 nr_entries = pci_msix_table_size(dev); 722 if (nvec > nr_entries) 723 return -EINVAL; 724 725 /* Check for any invalid entries */ 726 for (i = 0; i < nvec; i++) { 727 if (entries[i].entry >= nr_entries) 728 return -EINVAL; /* invalid entry */ 729 for (j = i + 1; j < nvec; j++) { 730 if (entries[i].entry == entries[j].entry) 731 return -EINVAL; /* duplicate entry */ 732 } 733 } 734 WARN_ON(!!dev->msix_enabled); 735 736 /* Check whether driver already requested for MSI irq */ 737 if (dev->msi_enabled) { 738 dev_info(&dev->dev, "can't enable MSI-X " 739 "(MSI IRQ already assigned)\n"); 740 return -EINVAL; 741 } 742 status = msix_capability_init(dev, entries, nvec); 743 return status; 744} 745EXPORT_SYMBOL(pci_enable_msix); 746 747static void msix_free_all_irqs(struct pci_dev *dev) 748{ 749 msi_free_irqs(dev); 750} 751 752void pci_msix_shutdown(struct pci_dev* dev) 753{ 754 if (!pci_msi_enable || !dev || !dev->msix_enabled) 755 return; 756 757 msix_set_enable(dev, 0); 758 pci_intx_for_msi(dev, 1); 759 dev->msix_enabled = 0; 760} 761void pci_disable_msix(struct pci_dev* dev) 762{ 763 if (!pci_msi_enable || !dev || !dev->msix_enabled) 764 return; 765 766 pci_msix_shutdown(dev); 767 768 msix_free_all_irqs(dev); 769} 770EXPORT_SYMBOL(pci_disable_msix); 771 772/** 773 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state 774 * @dev: pointer to the pci_dev data structure of MSI(X) device function 775 * 776 * Being called during hotplug remove, from which the device function 777 * is hot-removed. All previous assigned MSI/MSI-X irqs, if 778 * allocated for this device function, are reclaimed to unused state, 779 * which may be used later on. 780 **/ 781void msi_remove_pci_irq_vectors(struct pci_dev* dev) 782{ 783 if (!pci_msi_enable || !dev) 784 return; 785 786 if (dev->msi_enabled) 787 msi_free_irqs(dev); 788 789 if (dev->msix_enabled) 790 msix_free_all_irqs(dev); 791} 792 793void pci_no_msi(void) 794{ 795 pci_msi_enable = 0; 796} 797 798/** 799 * pci_msi_enabled - is MSI enabled? 800 * 801 * Returns true if MSI has not been disabled by the command-line option 802 * pci=nomsi. 803 **/ 804int pci_msi_enabled(void) 805{ 806 return pci_msi_enable; 807} 808EXPORT_SYMBOL(pci_msi_enabled); 809 810void pci_msi_init_pci_dev(struct pci_dev *dev) 811{ 812 INIT_LIST_HEAD(&dev->msi_list); 813} 814