msi.c revision f5f2b13129a6541debf8851bae843cbbf48298b7
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 */ 8 9#include <linux/err.h> 10#include <linux/mm.h> 11#include <linux/irq.h> 12#include <linux/interrupt.h> 13#include <linux/init.h> 14#include <linux/ioport.h> 15#include <linux/smp_lock.h> 16#include <linux/pci.h> 17#include <linux/proc_fs.h> 18#include <linux/msi.h> 19 20#include <asm/errno.h> 21#include <asm/io.h> 22#include <asm/smp.h> 23 24#include "pci.h" 25#include "msi.h" 26 27static struct kmem_cache* msi_cachep; 28 29static int pci_msi_enable = 1; 30 31static int msi_cache_init(void) 32{ 33 msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc), 34 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 35 if (!msi_cachep) 36 return -ENOMEM; 37 38 return 0; 39} 40 41static void msi_set_mask_bit(unsigned int irq, int flag) 42{ 43 struct msi_desc *entry; 44 45 entry = get_irq_msi(irq); 46 BUG_ON(!entry || !entry->dev); 47 switch (entry->msi_attrib.type) { 48 case PCI_CAP_ID_MSI: 49 if (entry->msi_attrib.maskbit) { 50 int pos; 51 u32 mask_bits; 52 53 pos = (long)entry->mask_base; 54 pci_read_config_dword(entry->dev, pos, &mask_bits); 55 mask_bits &= ~(1); 56 mask_bits |= flag; 57 pci_write_config_dword(entry->dev, pos, mask_bits); 58 } 59 break; 60 case PCI_CAP_ID_MSIX: 61 { 62 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 63 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 64 writel(flag, entry->mask_base + offset); 65 break; 66 } 67 default: 68 BUG(); 69 break; 70 } 71} 72 73void read_msi_msg(unsigned int irq, struct msi_msg *msg) 74{ 75 struct msi_desc *entry = get_irq_msi(irq); 76 switch(entry->msi_attrib.type) { 77 case PCI_CAP_ID_MSI: 78 { 79 struct pci_dev *dev = entry->dev; 80 int pos = entry->msi_attrib.pos; 81 u16 data; 82 83 pci_read_config_dword(dev, msi_lower_address_reg(pos), 84 &msg->address_lo); 85 if (entry->msi_attrib.is_64) { 86 pci_read_config_dword(dev, msi_upper_address_reg(pos), 87 &msg->address_hi); 88 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 89 } else { 90 msg->address_hi = 0; 91 pci_read_config_word(dev, msi_data_reg(pos, 1), &data); 92 } 93 msg->data = data; 94 break; 95 } 96 case PCI_CAP_ID_MSIX: 97 { 98 void __iomem *base; 99 base = entry->mask_base + 100 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 101 102 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 103 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 104 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); 105 break; 106 } 107 default: 108 BUG(); 109 } 110} 111 112void write_msi_msg(unsigned int irq, struct msi_msg *msg) 113{ 114 struct msi_desc *entry = get_irq_msi(irq); 115 switch (entry->msi_attrib.type) { 116 case PCI_CAP_ID_MSI: 117 { 118 struct pci_dev *dev = entry->dev; 119 int pos = entry->msi_attrib.pos; 120 121 pci_write_config_dword(dev, msi_lower_address_reg(pos), 122 msg->address_lo); 123 if (entry->msi_attrib.is_64) { 124 pci_write_config_dword(dev, msi_upper_address_reg(pos), 125 msg->address_hi); 126 pci_write_config_word(dev, msi_data_reg(pos, 1), 127 msg->data); 128 } else { 129 pci_write_config_word(dev, msi_data_reg(pos, 0), 130 msg->data); 131 } 132 break; 133 } 134 case PCI_CAP_ID_MSIX: 135 { 136 void __iomem *base; 137 base = entry->mask_base + 138 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 139 140 writel(msg->address_lo, 141 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 142 writel(msg->address_hi, 143 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 144 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); 145 break; 146 } 147 default: 148 BUG(); 149 } 150} 151 152void mask_msi_irq(unsigned int irq) 153{ 154 msi_set_mask_bit(irq, 1); 155} 156 157void unmask_msi_irq(unsigned int irq) 158{ 159 msi_set_mask_bit(irq, 0); 160} 161 162static int msi_free_irq(struct pci_dev* dev, int irq); 163 164static int msi_init(void) 165{ 166 static int status = -ENOMEM; 167 168 if (!status) 169 return status; 170 171 status = msi_cache_init(); 172 if (status < 0) { 173 pci_msi_enable = 0; 174 printk(KERN_WARNING "PCI: MSI cache init failed\n"); 175 return status; 176 } 177 178 return status; 179} 180 181static struct msi_desc* alloc_msi_entry(void) 182{ 183 struct msi_desc *entry; 184 185 entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL); 186 if (!entry) 187 return NULL; 188 189 entry->link.tail = entry->link.head = 0; /* single message */ 190 entry->dev = NULL; 191 192 return entry; 193} 194 195static void enable_msi_mode(struct pci_dev *dev, int pos, int type) 196{ 197 u16 control; 198 199 pci_read_config_word(dev, msi_control_reg(pos), &control); 200 if (type == PCI_CAP_ID_MSI) { 201 /* Set enabled bits to single MSI & enable MSI_enable bit */ 202 msi_enable(control, 1); 203 pci_write_config_word(dev, msi_control_reg(pos), control); 204 dev->msi_enabled = 1; 205 } else { 206 msix_enable(control); 207 pci_write_config_word(dev, msi_control_reg(pos), control); 208 dev->msix_enabled = 1; 209 } 210 211 pci_intx(dev, 0); /* disable intx */ 212} 213 214static void disable_msi_mode(struct pci_dev *dev, int pos, int type) 215{ 216 u16 control; 217 218 pci_read_config_word(dev, msi_control_reg(pos), &control); 219 if (type == PCI_CAP_ID_MSI) { 220 /* Set enabled bits to single MSI & enable MSI_enable bit */ 221 msi_disable(control); 222 pci_write_config_word(dev, msi_control_reg(pos), control); 223 dev->msi_enabled = 0; 224 } else { 225 msix_disable(control); 226 pci_write_config_word(dev, msi_control_reg(pos), control); 227 dev->msix_enabled = 0; 228 } 229 230 pci_intx(dev, 1); /* enable intx */ 231} 232 233#ifdef CONFIG_PM 234static int __pci_save_msi_state(struct pci_dev *dev) 235{ 236 int pos, i = 0; 237 u16 control; 238 struct pci_cap_saved_state *save_state; 239 u32 *cap; 240 241 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 242 if (pos <= 0 || dev->no_msi) 243 return 0; 244 245 pci_read_config_word(dev, msi_control_reg(pos), &control); 246 if (!(control & PCI_MSI_FLAGS_ENABLE)) 247 return 0; 248 249 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, 250 GFP_KERNEL); 251 if (!save_state) { 252 printk(KERN_ERR "Out of memory in pci_save_msi_state\n"); 253 return -ENOMEM; 254 } 255 cap = &save_state->data[0]; 256 257 pci_read_config_dword(dev, pos, &cap[i++]); 258 control = cap[0] >> 16; 259 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]); 260 if (control & PCI_MSI_FLAGS_64BIT) { 261 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]); 262 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]); 263 } else 264 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]); 265 if (control & PCI_MSI_FLAGS_MASKBIT) 266 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]); 267 save_state->cap_nr = PCI_CAP_ID_MSI; 268 pci_add_saved_cap(dev, save_state); 269 return 0; 270} 271 272static void __pci_restore_msi_state(struct pci_dev *dev) 273{ 274 int i = 0, pos; 275 u16 control; 276 struct pci_cap_saved_state *save_state; 277 u32 *cap; 278 279 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); 280 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 281 if (!save_state || pos <= 0) 282 return; 283 cap = &save_state->data[0]; 284 285 control = cap[i++] >> 16; 286 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); 287 if (control & PCI_MSI_FLAGS_64BIT) { 288 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); 289 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]); 290 } else 291 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]); 292 if (control & PCI_MSI_FLAGS_MASKBIT) 293 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); 294 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 295 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 296 pci_remove_saved_cap(save_state); 297 kfree(save_state); 298} 299 300static int __pci_save_msix_state(struct pci_dev *dev) 301{ 302 int pos; 303 int irq, head, tail = 0; 304 u16 control; 305 struct pci_cap_saved_state *save_state; 306 307 if (!dev->msix_enabled) 308 return 0; 309 310 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 311 if (pos <= 0 || dev->no_msi) 312 return 0; 313 314 /* save the capability */ 315 pci_read_config_word(dev, msi_control_reg(pos), &control); 316 if (!(control & PCI_MSIX_FLAGS_ENABLE)) 317 return 0; 318 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), 319 GFP_KERNEL); 320 if (!save_state) { 321 printk(KERN_ERR "Out of memory in pci_save_msix_state\n"); 322 return -ENOMEM; 323 } 324 *((u16 *)&save_state->data[0]) = control; 325 326 /* save the table */ 327 irq = head = dev->first_msi_irq; 328 while (head != tail) { 329 struct msi_desc *entry; 330 331 entry = get_irq_msi(irq); 332 read_msi_msg(irq, &entry->msg_save); 333 334 tail = entry->link.tail; 335 irq = tail; 336 } 337 338 save_state->cap_nr = PCI_CAP_ID_MSIX; 339 pci_add_saved_cap(dev, save_state); 340 return 0; 341} 342 343int pci_save_msi_state(struct pci_dev *dev) 344{ 345 int rc; 346 347 rc = __pci_save_msi_state(dev); 348 if (rc) 349 return rc; 350 351 rc = __pci_save_msix_state(dev); 352 353 return rc; 354} 355 356static void __pci_restore_msix_state(struct pci_dev *dev) 357{ 358 u16 save; 359 int pos; 360 int irq, head, tail = 0; 361 struct msi_desc *entry; 362 struct pci_cap_saved_state *save_state; 363 364 if (!dev->msix_enabled) 365 return; 366 367 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX); 368 if (!save_state) 369 return; 370 save = *((u16 *)&save_state->data[0]); 371 pci_remove_saved_cap(save_state); 372 kfree(save_state); 373 374 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 375 if (pos <= 0) 376 return; 377 378 /* route the table */ 379 irq = head = dev->first_msi_irq; 380 while (head != tail) { 381 entry = get_irq_msi(irq); 382 write_msi_msg(irq, &entry->msg_save); 383 384 tail = entry->link.tail; 385 irq = tail; 386 } 387 388 pci_write_config_word(dev, msi_control_reg(pos), save); 389 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 390} 391 392void pci_restore_msi_state(struct pci_dev *dev) 393{ 394 __pci_restore_msi_state(dev); 395 __pci_restore_msix_state(dev); 396} 397#endif /* CONFIG_PM */ 398 399/** 400 * msi_capability_init - configure device's MSI capability structure 401 * @dev: pointer to the pci_dev data structure of MSI device function 402 * 403 * Setup the MSI capability structure of device function with a single 404 * MSI irq, regardless of device function is capable of handling 405 * multiple messages. A return of zero indicates the successful setup 406 * of an entry zero with the new MSI irq or non-zero for otherwise. 407 **/ 408static int msi_capability_init(struct pci_dev *dev) 409{ 410 struct msi_desc *entry; 411 int pos, irq; 412 u16 control; 413 414 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 415 pci_read_config_word(dev, msi_control_reg(pos), &control); 416 /* MSI Entry Initialization */ 417 entry = alloc_msi_entry(); 418 if (!entry) 419 return -ENOMEM; 420 421 entry->msi_attrib.type = PCI_CAP_ID_MSI; 422 entry->msi_attrib.is_64 = is_64bit_address(control); 423 entry->msi_attrib.entry_nr = 0; 424 entry->msi_attrib.maskbit = is_mask_bit_support(control); 425 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 426 entry->msi_attrib.pos = pos; 427 if (is_mask_bit_support(control)) { 428 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, 429 is_64bit_address(control)); 430 } 431 entry->dev = dev; 432 if (entry->msi_attrib.maskbit) { 433 unsigned int maskbits, temp; 434 /* All MSIs are unmasked by default, Mask them all */ 435 pci_read_config_dword(dev, 436 msi_mask_bits_reg(pos, is_64bit_address(control)), 437 &maskbits); 438 temp = (1 << multi_msi_capable(control)); 439 temp = ((temp - 1) & ~temp); 440 maskbits |= temp; 441 pci_write_config_dword(dev, 442 msi_mask_bits_reg(pos, is_64bit_address(control)), 443 maskbits); 444 } 445 /* Configure MSI capability structure */ 446 irq = arch_setup_msi_irq(dev, entry); 447 if (irq < 0) { 448 kmem_cache_free(msi_cachep, entry); 449 return irq; 450 } 451 entry->link.head = irq; 452 entry->link.tail = irq; 453 dev->first_msi_irq = irq; 454 set_irq_msi(irq, entry); 455 456 /* Set MSI enabled bits */ 457 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 458 459 dev->irq = irq; 460 return 0; 461} 462 463/** 464 * msix_capability_init - configure device's MSI-X capability 465 * @dev: pointer to the pci_dev data structure of MSI-X device function 466 * @entries: pointer to an array of struct msix_entry entries 467 * @nvec: number of @entries 468 * 469 * Setup the MSI-X capability structure of device function with a 470 * single MSI-X irq. A return of zero indicates the successful setup of 471 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 472 **/ 473static int msix_capability_init(struct pci_dev *dev, 474 struct msix_entry *entries, int nvec) 475{ 476 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; 477 int irq, pos, i, j, nr_entries, temp = 0; 478 unsigned long phys_addr; 479 u32 table_offset; 480 u16 control; 481 u8 bir; 482 void __iomem *base; 483 484 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 485 /* Request & Map MSI-X table region */ 486 pci_read_config_word(dev, msi_control_reg(pos), &control); 487 nr_entries = multi_msix_capable(control); 488 489 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 490 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 491 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 492 phys_addr = pci_resource_start (dev, bir) + table_offset; 493 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 494 if (base == NULL) 495 return -ENOMEM; 496 497 /* MSI-X Table Initialization */ 498 for (i = 0; i < nvec; i++) { 499 entry = alloc_msi_entry(); 500 if (!entry) 501 break; 502 503 j = entries[i].entry; 504 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 505 entry->msi_attrib.is_64 = 1; 506 entry->msi_attrib.entry_nr = j; 507 entry->msi_attrib.maskbit = 1; 508 entry->msi_attrib.default_irq = dev->irq; 509 entry->msi_attrib.pos = pos; 510 entry->dev = dev; 511 entry->mask_base = base; 512 513 /* Configure MSI-X capability structure */ 514 irq = arch_setup_msi_irq(dev, entry); 515 if (irq < 0) { 516 kmem_cache_free(msi_cachep, entry); 517 break; 518 } 519 entries[i].vector = irq; 520 if (!head) { 521 entry->link.head = irq; 522 entry->link.tail = irq; 523 head = entry; 524 } else { 525 entry->link.head = temp; 526 entry->link.tail = tail->link.tail; 527 tail->link.tail = irq; 528 head->link.head = irq; 529 } 530 temp = irq; 531 tail = entry; 532 533 set_irq_msi(irq, entry); 534 } 535 if (i != nvec) { 536 int avail = i - 1; 537 i--; 538 for (; i >= 0; i--) { 539 irq = (entries + i)->vector; 540 msi_free_irq(dev, irq); 541 (entries + i)->vector = 0; 542 } 543 /* If we had some success report the number of irqs 544 * we succeeded in setting up. 545 */ 546 if (avail <= 0) 547 avail = -EBUSY; 548 return avail; 549 } 550 dev->first_msi_irq = entries[0].vector; 551 /* Set MSI-X enabled bits */ 552 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 553 554 return 0; 555} 556 557/** 558 * pci_msi_supported - check whether MSI may be enabled on device 559 * @dev: pointer to the pci_dev data structure of MSI device function 560 * 561 * Look at global flags, the device itself, and its parent busses 562 * to return 0 if MSI are supported for the device. 563 **/ 564static 565int pci_msi_supported(struct pci_dev * dev) 566{ 567 struct pci_bus *bus; 568 569 /* MSI must be globally enabled and supported by the device */ 570 if (!pci_msi_enable || !dev || dev->no_msi) 571 return -EINVAL; 572 573 /* Any bridge which does NOT route MSI transactions from it's 574 * secondary bus to it's primary bus must set NO_MSI flag on 575 * the secondary pci_bus. 576 * We expect only arch-specific PCI host bus controller driver 577 * or quirks for specific PCI bridges to be setting NO_MSI. 578 */ 579 for (bus = dev->bus; bus; bus = bus->parent) 580 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 581 return -EINVAL; 582 583 return 0; 584} 585 586/** 587 * pci_enable_msi - configure device's MSI capability structure 588 * @dev: pointer to the pci_dev data structure of MSI device function 589 * 590 * Setup the MSI capability structure of device function with 591 * a single MSI irq upon its software driver call to request for 592 * MSI mode enabled on its hardware device function. A return of zero 593 * indicates the successful setup of an entry zero with the new MSI 594 * irq or non-zero for otherwise. 595 **/ 596int pci_enable_msi(struct pci_dev* dev) 597{ 598 int pos, status; 599 600 if (pci_msi_supported(dev) < 0) 601 return -EINVAL; 602 603 status = msi_init(); 604 if (status < 0) 605 return status; 606 607 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 608 if (!pos) 609 return -EINVAL; 610 611 WARN_ON(!!dev->msi_enabled); 612 613 /* Check whether driver already requested for MSI-X irqs */ 614 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 615 if (pos > 0 && dev->msix_enabled) { 616 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 617 "Device already has MSI-X enabled\n", 618 pci_name(dev)); 619 return -EINVAL; 620 } 621 status = msi_capability_init(dev); 622 return status; 623} 624 625void pci_disable_msi(struct pci_dev* dev) 626{ 627 struct msi_desc *entry; 628 int pos, default_irq; 629 u16 control; 630 631 if (!pci_msi_enable) 632 return; 633 if (!dev) 634 return; 635 636 if (!dev->msi_enabled) 637 return; 638 639 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 640 if (!pos) 641 return; 642 643 pci_read_config_word(dev, msi_control_reg(pos), &control); 644 if (!(control & PCI_MSI_FLAGS_ENABLE)) 645 return; 646 647 648 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 649 650 entry = get_irq_msi(dev->first_msi_irq); 651 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 652 return; 653 } 654 if (irq_has_action(dev->first_msi_irq)) { 655 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " 656 "free_irq() on MSI irq %d\n", 657 pci_name(dev), dev->first_msi_irq); 658 BUG_ON(irq_has_action(dev->first_msi_irq)); 659 } else { 660 default_irq = entry->msi_attrib.default_irq; 661 msi_free_irq(dev, dev->first_msi_irq); 662 663 /* Restore dev->irq to its default pin-assertion irq */ 664 dev->irq = default_irq; 665 } 666 dev->first_msi_irq = 0; 667} 668 669static int msi_free_irq(struct pci_dev* dev, int irq) 670{ 671 struct msi_desc *entry; 672 int head, entry_nr, type; 673 void __iomem *base; 674 675 entry = get_irq_msi(irq); 676 if (!entry || entry->dev != dev) { 677 return -EINVAL; 678 } 679 type = entry->msi_attrib.type; 680 entry_nr = entry->msi_attrib.entry_nr; 681 head = entry->link.head; 682 base = entry->mask_base; 683 get_irq_msi(entry->link.head)->link.tail = entry->link.tail; 684 get_irq_msi(entry->link.tail)->link.head = entry->link.head; 685 686 arch_teardown_msi_irq(irq); 687 kmem_cache_free(msi_cachep, entry); 688 689 if (type == PCI_CAP_ID_MSIX) { 690 writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE + 691 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 692 693 if (head == irq) 694 iounmap(base); 695 } 696 697 return 0; 698} 699 700/** 701 * pci_enable_msix - configure device's MSI-X capability structure 702 * @dev: pointer to the pci_dev data structure of MSI-X device function 703 * @entries: pointer to an array of MSI-X entries 704 * @nvec: number of MSI-X irqs requested for allocation by device driver 705 * 706 * Setup the MSI-X capability structure of device function with the number 707 * of requested irqs upon its software driver call to request for 708 * MSI-X mode enabled on its hardware device function. A return of zero 709 * indicates the successful configuration of MSI-X capability structure 710 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 711 * Or a return of > 0 indicates that driver request is exceeding the number 712 * of irqs available. Driver should use the returned value to re-send 713 * its request. 714 **/ 715int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 716{ 717 int status, pos, nr_entries; 718 int i, j; 719 u16 control; 720 721 if (!entries || pci_msi_supported(dev) < 0) 722 return -EINVAL; 723 724 status = msi_init(); 725 if (status < 0) 726 return status; 727 728 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 729 if (!pos) 730 return -EINVAL; 731 732 pci_read_config_word(dev, msi_control_reg(pos), &control); 733 nr_entries = multi_msix_capable(control); 734 if (nvec > nr_entries) 735 return -EINVAL; 736 737 /* Check for any invalid entries */ 738 for (i = 0; i < nvec; i++) { 739 if (entries[i].entry >= nr_entries) 740 return -EINVAL; /* invalid entry */ 741 for (j = i + 1; j < nvec; j++) { 742 if (entries[i].entry == entries[j].entry) 743 return -EINVAL; /* duplicate entry */ 744 } 745 } 746 WARN_ON(!!dev->msix_enabled); 747 748 /* Check whether driver already requested for MSI irq */ 749 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && 750 dev->msi_enabled) { 751 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 752 "Device already has an MSI irq assigned\n", 753 pci_name(dev)); 754 return -EINVAL; 755 } 756 status = msix_capability_init(dev, entries, nvec); 757 return status; 758} 759 760void pci_disable_msix(struct pci_dev* dev) 761{ 762 int irq, head, tail = 0, warning = 0; 763 int pos; 764 u16 control; 765 766 if (!pci_msi_enable) 767 return; 768 if (!dev) 769 return; 770 771 if (!dev->msix_enabled) 772 return; 773 774 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 775 if (!pos) 776 return; 777 778 pci_read_config_word(dev, msi_control_reg(pos), &control); 779 if (!(control & PCI_MSIX_FLAGS_ENABLE)) 780 return; 781 782 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 783 784 irq = head = dev->first_msi_irq; 785 while (head != tail) { 786 tail = get_irq_msi(irq)->link.tail; 787 if (irq_has_action(irq)) 788 warning = 1; 789 else if (irq != head) /* Release MSI-X irq */ 790 msi_free_irq(dev, irq); 791 irq = tail; 792 } 793 msi_free_irq(dev, irq); 794 if (warning) { 795 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without " 796 "free_irq() on all MSI-X irqs\n", 797 pci_name(dev)); 798 BUG_ON(warning > 0); 799 } 800 dev->first_msi_irq = 0; 801} 802 803/** 804 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state 805 * @dev: pointer to the pci_dev data structure of MSI(X) device function 806 * 807 * Being called during hotplug remove, from which the device function 808 * is hot-removed. All previous assigned MSI/MSI-X irqs, if 809 * allocated for this device function, are reclaimed to unused state, 810 * which may be used later on. 811 **/ 812void msi_remove_pci_irq_vectors(struct pci_dev* dev) 813{ 814 if (!pci_msi_enable || !dev) 815 return; 816 817 if (dev->msi_enabled) { 818 if (irq_has_action(dev->first_msi_irq)) { 819 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 820 "called without free_irq() on MSI irq %d\n", 821 pci_name(dev), dev->first_msi_irq); 822 BUG_ON(irq_has_action(dev->first_msi_irq)); 823 } else /* Release MSI irq assigned to this device */ 824 msi_free_irq(dev, dev->first_msi_irq); 825 } 826 if (dev->msix_enabled) { 827 int irq, head, tail = 0, warning = 0; 828 void __iomem *base = NULL; 829 830 irq = head = dev->first_msi_irq; 831 while (head != tail) { 832 tail = get_irq_msi(irq)->link.tail; 833 base = get_irq_msi(irq)->mask_base; 834 if (irq_has_action(irq)) 835 warning = 1; 836 else if (irq != head) /* Release MSI-X irq */ 837 msi_free_irq(dev, irq); 838 irq = tail; 839 } 840 msi_free_irq(dev, irq); 841 if (warning) { 842 iounmap(base); 843 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 844 "called without free_irq() on all MSI-X irqs\n", 845 pci_name(dev)); 846 BUG_ON(warning > 0); 847 } 848 } 849} 850 851void pci_no_msi(void) 852{ 853 pci_msi_enable = 0; 854} 855 856EXPORT_SYMBOL(pci_enable_msi); 857EXPORT_SYMBOL(pci_disable_msi); 858EXPORT_SYMBOL(pci_enable_msix); 859EXPORT_SYMBOL(pci_disable_msix); 860