msi.c revision 41017f0cac925e4a6bcf3359b75e5538112d4216
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 */ 8 9#include <linux/mm.h> 10#include <linux/irq.h> 11#include <linux/interrupt.h> 12#include <linux/init.h> 13#include <linux/config.h> 14#include <linux/ioport.h> 15#include <linux/smp_lock.h> 16#include <linux/pci.h> 17#include <linux/proc_fs.h> 18 19#include <asm/errno.h> 20#include <asm/io.h> 21#include <asm/smp.h> 22 23#include "pci.h" 24#include "msi.h" 25 26#define MSI_TARGET_CPU first_cpu(cpu_online_map) 27 28static DEFINE_SPINLOCK(msi_lock); 29static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; 30static kmem_cache_t* msi_cachep; 31 32static int pci_msi_enable = 1; 33static int last_alloc_vector; 34static int nr_released_vectors; 35static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS; 36static int nr_msix_devices; 37 38#ifndef CONFIG_X86_IO_APIC 39int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; 40u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 }; 41#endif 42 43static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags) 44{ 45 memset(p, 0, NR_IRQS * sizeof(struct msi_desc)); 46} 47 48static int msi_cache_init(void) 49{ 50 msi_cachep = kmem_cache_create("msi_cache", 51 NR_IRQS * sizeof(struct msi_desc), 52 0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL); 53 if (!msi_cachep) 54 return -ENOMEM; 55 56 return 0; 57} 58 59static void msi_set_mask_bit(unsigned int vector, int flag) 60{ 61 struct msi_desc *entry; 62 63 entry = (struct msi_desc *)msi_desc[vector]; 64 if (!entry || !entry->dev || !entry->mask_base) 65 return; 66 switch (entry->msi_attrib.type) { 67 case PCI_CAP_ID_MSI: 68 { 69 int pos; 70 u32 mask_bits; 71 72 pos = (long)entry->mask_base; 73 pci_read_config_dword(entry->dev, pos, &mask_bits); 74 mask_bits &= ~(1); 75 mask_bits |= flag; 76 pci_write_config_dword(entry->dev, pos, mask_bits); 77 break; 78 } 79 case PCI_CAP_ID_MSIX: 80 { 81 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 82 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 83 writel(flag, entry->mask_base + offset); 84 break; 85 } 86 default: 87 break; 88 } 89} 90 91#ifdef CONFIG_SMP 92static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) 93{ 94 struct msi_desc *entry; 95 struct msg_address address; 96 unsigned int irq = vector; 97 unsigned int dest_cpu = first_cpu(cpu_mask); 98 99 entry = (struct msi_desc *)msi_desc[vector]; 100 if (!entry || !entry->dev) 101 return; 102 103 switch (entry->msi_attrib.type) { 104 case PCI_CAP_ID_MSI: 105 { 106 int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI); 107 108 if (!pos) 109 return; 110 111 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), 112 &address.lo_address.value); 113 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 114 address.lo_address.value |= (cpu_physical_id(dest_cpu) << 115 MSI_TARGET_CPU_SHIFT); 116 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); 117 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), 118 address.lo_address.value); 119 set_native_irq_info(irq, cpu_mask); 120 break; 121 } 122 case PCI_CAP_ID_MSIX: 123 { 124 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 125 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; 126 127 address.lo_address.value = readl(entry->mask_base + offset); 128 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 129 address.lo_address.value |= (cpu_physical_id(dest_cpu) << 130 MSI_TARGET_CPU_SHIFT); 131 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); 132 writel(address.lo_address.value, entry->mask_base + offset); 133 set_native_irq_info(irq, cpu_mask); 134 break; 135 } 136 default: 137 break; 138 } 139} 140#else 141#define set_msi_affinity NULL 142#endif /* CONFIG_SMP */ 143 144static void mask_MSI_irq(unsigned int vector) 145{ 146 msi_set_mask_bit(vector, 1); 147} 148 149static void unmask_MSI_irq(unsigned int vector) 150{ 151 msi_set_mask_bit(vector, 0); 152} 153 154static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector) 155{ 156 struct msi_desc *entry; 157 unsigned long flags; 158 159 spin_lock_irqsave(&msi_lock, flags); 160 entry = msi_desc[vector]; 161 if (!entry || !entry->dev) { 162 spin_unlock_irqrestore(&msi_lock, flags); 163 return 0; 164 } 165 entry->msi_attrib.state = 1; /* Mark it active */ 166 spin_unlock_irqrestore(&msi_lock, flags); 167 168 return 0; /* never anything pending */ 169} 170 171static unsigned int startup_msi_irq_w_maskbit(unsigned int vector) 172{ 173 startup_msi_irq_wo_maskbit(vector); 174 unmask_MSI_irq(vector); 175 return 0; /* never anything pending */ 176} 177 178static void shutdown_msi_irq(unsigned int vector) 179{ 180 struct msi_desc *entry; 181 unsigned long flags; 182 183 spin_lock_irqsave(&msi_lock, flags); 184 entry = msi_desc[vector]; 185 if (entry && entry->dev) 186 entry->msi_attrib.state = 0; /* Mark it not active */ 187 spin_unlock_irqrestore(&msi_lock, flags); 188} 189 190static void end_msi_irq_wo_maskbit(unsigned int vector) 191{ 192 move_native_irq(vector); 193 ack_APIC_irq(); 194} 195 196static void end_msi_irq_w_maskbit(unsigned int vector) 197{ 198 move_native_irq(vector); 199 unmask_MSI_irq(vector); 200 ack_APIC_irq(); 201} 202 203static void do_nothing(unsigned int vector) 204{ 205} 206 207/* 208 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices, 209 * which implement the MSI-X Capability Structure. 210 */ 211static struct hw_interrupt_type msix_irq_type = { 212 .typename = "PCI-MSI-X", 213 .startup = startup_msi_irq_w_maskbit, 214 .shutdown = shutdown_msi_irq, 215 .enable = unmask_MSI_irq, 216 .disable = mask_MSI_irq, 217 .ack = mask_MSI_irq, 218 .end = end_msi_irq_w_maskbit, 219 .set_affinity = set_msi_affinity 220}; 221 222/* 223 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices, 224 * which implement the MSI Capability Structure with 225 * Mask-and-Pending Bits. 226 */ 227static struct hw_interrupt_type msi_irq_w_maskbit_type = { 228 .typename = "PCI-MSI", 229 .startup = startup_msi_irq_w_maskbit, 230 .shutdown = shutdown_msi_irq, 231 .enable = unmask_MSI_irq, 232 .disable = mask_MSI_irq, 233 .ack = mask_MSI_irq, 234 .end = end_msi_irq_w_maskbit, 235 .set_affinity = set_msi_affinity 236}; 237 238/* 239 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices, 240 * which implement the MSI Capability Structure without 241 * Mask-and-Pending Bits. 242 */ 243static struct hw_interrupt_type msi_irq_wo_maskbit_type = { 244 .typename = "PCI-MSI", 245 .startup = startup_msi_irq_wo_maskbit, 246 .shutdown = shutdown_msi_irq, 247 .enable = do_nothing, 248 .disable = do_nothing, 249 .ack = do_nothing, 250 .end = end_msi_irq_wo_maskbit, 251 .set_affinity = set_msi_affinity 252}; 253 254static void msi_data_init(struct msg_data *msi_data, 255 unsigned int vector) 256{ 257 memset(msi_data, 0, sizeof(struct msg_data)); 258 msi_data->vector = (u8)vector; 259 msi_data->delivery_mode = MSI_DELIVERY_MODE; 260 msi_data->level = MSI_LEVEL_MODE; 261 msi_data->trigger = MSI_TRIGGER_MODE; 262} 263 264static void msi_address_init(struct msg_address *msi_address) 265{ 266 unsigned int dest_id; 267 unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU); 268 269 memset(msi_address, 0, sizeof(struct msg_address)); 270 msi_address->hi_address = (u32)0; 271 dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT); 272 msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE; 273 msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE; 274 msi_address->lo_address.u.dest_id = dest_id; 275 msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT); 276} 277 278static int msi_free_vector(struct pci_dev* dev, int vector, int reassign); 279static int assign_msi_vector(void) 280{ 281 static int new_vector_avail = 1; 282 int vector; 283 unsigned long flags; 284 285 /* 286 * msi_lock is provided to ensure that successful allocation of MSI 287 * vector is assigned unique among drivers. 288 */ 289 spin_lock_irqsave(&msi_lock, flags); 290 291 if (!new_vector_avail) { 292 int free_vector = 0; 293 294 /* 295 * vector_irq[] = -1 indicates that this specific vector is: 296 * - assigned for MSI (since MSI have no associated IRQ) or 297 * - assigned for legacy if less than 16, or 298 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping 299 * vector_irq[] = 0 indicates that this vector, previously 300 * assigned for MSI, is freed by hotplug removed operations. 301 * This vector will be reused for any subsequent hotplug added 302 * operations. 303 * vector_irq[] > 0 indicates that this vector is assigned for 304 * IOxAPIC IRQs. This vector and its value provides a 1-to-1 305 * vector-to-IOxAPIC IRQ mapping. 306 */ 307 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { 308 if (vector_irq[vector] != 0) 309 continue; 310 free_vector = vector; 311 if (!msi_desc[vector]) 312 break; 313 else 314 continue; 315 } 316 if (!free_vector) { 317 spin_unlock_irqrestore(&msi_lock, flags); 318 return -EBUSY; 319 } 320 vector_irq[free_vector] = -1; 321 nr_released_vectors--; 322 spin_unlock_irqrestore(&msi_lock, flags); 323 if (msi_desc[free_vector] != NULL) { 324 struct pci_dev *dev; 325 int tail; 326 327 /* free all linked vectors before re-assign */ 328 do { 329 spin_lock_irqsave(&msi_lock, flags); 330 dev = msi_desc[free_vector]->dev; 331 tail = msi_desc[free_vector]->link.tail; 332 spin_unlock_irqrestore(&msi_lock, flags); 333 msi_free_vector(dev, tail, 1); 334 } while (free_vector != tail); 335 } 336 337 return free_vector; 338 } 339 vector = assign_irq_vector(AUTO_ASSIGN); 340 last_alloc_vector = vector; 341 if (vector == LAST_DEVICE_VECTOR) 342 new_vector_avail = 0; 343 344 spin_unlock_irqrestore(&msi_lock, flags); 345 return vector; 346} 347 348static int get_new_vector(void) 349{ 350 int vector = assign_msi_vector(); 351 352 if (vector > 0) 353 set_intr_gate(vector, interrupt[vector]); 354 355 return vector; 356} 357 358static int msi_init(void) 359{ 360 static int status = -ENOMEM; 361 362 if (!status) 363 return status; 364 365 if (pci_msi_quirk) { 366 pci_msi_enable = 0; 367 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n"); 368 status = -EINVAL; 369 return status; 370 } 371 372 status = msi_cache_init(); 373 if (status < 0) { 374 pci_msi_enable = 0; 375 printk(KERN_WARNING "PCI: MSI cache init failed\n"); 376 return status; 377 } 378 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); 379 if (last_alloc_vector < 0) { 380 pci_msi_enable = 0; 381 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n"); 382 status = -EBUSY; 383 return status; 384 } 385 vector_irq[last_alloc_vector] = 0; 386 nr_released_vectors++; 387 388 return status; 389} 390 391static int get_msi_vector(struct pci_dev *dev) 392{ 393 return get_new_vector(); 394} 395 396static struct msi_desc* alloc_msi_entry(void) 397{ 398 struct msi_desc *entry; 399 400 entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL); 401 if (!entry) 402 return NULL; 403 404 memset(entry, 0, sizeof(struct msi_desc)); 405 entry->link.tail = entry->link.head = 0; /* single message */ 406 entry->dev = NULL; 407 408 return entry; 409} 410 411static void attach_msi_entry(struct msi_desc *entry, int vector) 412{ 413 unsigned long flags; 414 415 spin_lock_irqsave(&msi_lock, flags); 416 msi_desc[vector] = entry; 417 spin_unlock_irqrestore(&msi_lock, flags); 418} 419 420static void irq_handler_init(int cap_id, int pos, int mask) 421{ 422 unsigned long flags; 423 424 spin_lock_irqsave(&irq_desc[pos].lock, flags); 425 if (cap_id == PCI_CAP_ID_MSIX) 426 irq_desc[pos].handler = &msix_irq_type; 427 else { 428 if (!mask) 429 irq_desc[pos].handler = &msi_irq_wo_maskbit_type; 430 else 431 irq_desc[pos].handler = &msi_irq_w_maskbit_type; 432 } 433 spin_unlock_irqrestore(&irq_desc[pos].lock, flags); 434} 435 436static void enable_msi_mode(struct pci_dev *dev, int pos, int type) 437{ 438 u16 control; 439 440 pci_read_config_word(dev, msi_control_reg(pos), &control); 441 if (type == PCI_CAP_ID_MSI) { 442 /* Set enabled bits to single MSI & enable MSI_enable bit */ 443 msi_enable(control, 1); 444 pci_write_config_word(dev, msi_control_reg(pos), control); 445 } else { 446 msix_enable(control); 447 pci_write_config_word(dev, msi_control_reg(pos), control); 448 } 449 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { 450 /* PCI Express Endpoint device detected */ 451 pci_intx(dev, 0); /* disable intx */ 452 } 453} 454 455void disable_msi_mode(struct pci_dev *dev, int pos, int type) 456{ 457 u16 control; 458 459 pci_read_config_word(dev, msi_control_reg(pos), &control); 460 if (type == PCI_CAP_ID_MSI) { 461 /* Set enabled bits to single MSI & enable MSI_enable bit */ 462 msi_disable(control); 463 pci_write_config_word(dev, msi_control_reg(pos), control); 464 } else { 465 msix_disable(control); 466 pci_write_config_word(dev, msi_control_reg(pos), control); 467 } 468 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { 469 /* PCI Express Endpoint device detected */ 470 pci_intx(dev, 1); /* enable intx */ 471 } 472} 473 474static int msi_lookup_vector(struct pci_dev *dev, int type) 475{ 476 int vector; 477 unsigned long flags; 478 479 spin_lock_irqsave(&msi_lock, flags); 480 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { 481 if (!msi_desc[vector] || msi_desc[vector]->dev != dev || 482 msi_desc[vector]->msi_attrib.type != type || 483 msi_desc[vector]->msi_attrib.default_vector != dev->irq) 484 continue; 485 spin_unlock_irqrestore(&msi_lock, flags); 486 /* This pre-assigned MSI vector for this device 487 already exits. Override dev->irq with this vector */ 488 dev->irq = vector; 489 return 0; 490 } 491 spin_unlock_irqrestore(&msi_lock, flags); 492 493 return -EACCES; 494} 495 496void pci_scan_msi_device(struct pci_dev *dev) 497{ 498 if (!dev) 499 return; 500 501 if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0) 502 nr_msix_devices++; 503 else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0) 504 nr_reserved_vectors++; 505} 506 507#ifdef CONFIG_PM 508int pci_save_msi_state(struct pci_dev *dev) 509{ 510 int pos, i = 0; 511 u16 control; 512 struct pci_cap_saved_state *save_state; 513 u32 *cap; 514 515 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 516 if (pos <= 0 || dev->no_msi) 517 return 0; 518 519 pci_read_config_word(dev, msi_control_reg(pos), &control); 520 if (!(control & PCI_MSI_FLAGS_ENABLE)) 521 return 0; 522 523 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, 524 GFP_KERNEL); 525 if (!save_state) { 526 printk(KERN_ERR "Out of memory in pci_save_msi_state\n"); 527 return -ENOMEM; 528 } 529 cap = &save_state->data[0]; 530 531 pci_read_config_dword(dev, pos, &cap[i++]); 532 control = cap[0] >> 16; 533 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]); 534 if (control & PCI_MSI_FLAGS_64BIT) { 535 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]); 536 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]); 537 } else 538 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]); 539 if (control & PCI_MSI_FLAGS_MASKBIT) 540 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]); 541 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 542 save_state->cap_nr = PCI_CAP_ID_MSI; 543 pci_add_saved_cap(dev, save_state); 544 return 0; 545} 546 547void pci_restore_msi_state(struct pci_dev *dev) 548{ 549 int i = 0, pos; 550 u16 control; 551 struct pci_cap_saved_state *save_state; 552 u32 *cap; 553 554 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); 555 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 556 if (!save_state || pos <= 0) 557 return; 558 cap = &save_state->data[0]; 559 560 control = cap[i++] >> 16; 561 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); 562 if (control & PCI_MSI_FLAGS_64BIT) { 563 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); 564 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]); 565 } else 566 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]); 567 if (control & PCI_MSI_FLAGS_MASKBIT) 568 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); 569 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 570 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 571 pci_remove_saved_cap(save_state); 572 kfree(save_state); 573} 574 575int pci_save_msix_state(struct pci_dev *dev) 576{ 577 int pos; 578 u16 control; 579 struct pci_cap_saved_state *save_state; 580 581 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 582 if (pos <= 0 || dev->no_msi) 583 return 0; 584 585 pci_read_config_word(dev, msi_control_reg(pos), &control); 586 if (!(control & PCI_MSIX_FLAGS_ENABLE)) 587 return 0; 588 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), 589 GFP_KERNEL); 590 if (!save_state) { 591 printk(KERN_ERR "Out of memory in pci_save_msix_state\n"); 592 return -ENOMEM; 593 } 594 *((u16 *)&save_state->data[0]) = control; 595 596 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 597 save_state->cap_nr = PCI_CAP_ID_MSIX; 598 pci_add_saved_cap(dev, save_state); 599 return 0; 600} 601 602void pci_restore_msix_state(struct pci_dev *dev) 603{ 604 u16 save; 605 int pos; 606 int vector, head, tail = 0; 607 void __iomem *base; 608 int j; 609 struct msg_address address; 610 struct msg_data data; 611 struct msi_desc *entry; 612 int temp; 613 struct pci_cap_saved_state *save_state; 614 615 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX); 616 if (!save_state) 617 return; 618 save = *((u16 *)&save_state->data[0]); 619 pci_remove_saved_cap(save_state); 620 kfree(save_state); 621 622 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 623 if (pos <= 0) 624 return; 625 626 /* route the table */ 627 temp = dev->irq; 628 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) 629 return; 630 vector = head = dev->irq; 631 while (head != tail) { 632 entry = msi_desc[vector]; 633 base = entry->mask_base; 634 j = entry->msi_attrib.entry_nr; 635 636 msi_address_init(&address); 637 msi_data_init(&data, vector); 638 639 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 640 address.lo_address.value |= entry->msi_attrib.current_cpu << 641 MSI_TARGET_CPU_SHIFT; 642 643 writel(address.lo_address.value, 644 base + j * PCI_MSIX_ENTRY_SIZE + 645 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 646 writel(address.hi_address, 647 base + j * PCI_MSIX_ENTRY_SIZE + 648 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 649 writel(*(u32*)&data, 650 base + j * PCI_MSIX_ENTRY_SIZE + 651 PCI_MSIX_ENTRY_DATA_OFFSET); 652 653 tail = msi_desc[vector]->link.tail; 654 vector = tail; 655 } 656 dev->irq = temp; 657 658 pci_write_config_word(dev, msi_control_reg(pos), save); 659 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 660} 661#endif 662 663static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) 664{ 665 struct msg_address address; 666 struct msg_data data; 667 int pos, vector = dev->irq; 668 u16 control; 669 670 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 671 pci_read_config_word(dev, msi_control_reg(pos), &control); 672 /* Configure MSI capability structure */ 673 msi_address_init(&address); 674 msi_data_init(&data, vector); 675 entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >> 676 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); 677 pci_write_config_dword(dev, msi_lower_address_reg(pos), 678 address.lo_address.value); 679 if (is_64bit_address(control)) { 680 pci_write_config_dword(dev, 681 msi_upper_address_reg(pos), address.hi_address); 682 pci_write_config_word(dev, 683 msi_data_reg(pos, 1), *((u32*)&data)); 684 } else 685 pci_write_config_word(dev, 686 msi_data_reg(pos, 0), *((u32*)&data)); 687 if (entry->msi_attrib.maskbit) { 688 unsigned int maskbits, temp; 689 /* All MSIs are unmasked by default, Mask them all */ 690 pci_read_config_dword(dev, 691 msi_mask_bits_reg(pos, is_64bit_address(control)), 692 &maskbits); 693 temp = (1 << multi_msi_capable(control)); 694 temp = ((temp - 1) & ~temp); 695 maskbits |= temp; 696 pci_write_config_dword(dev, 697 msi_mask_bits_reg(pos, is_64bit_address(control)), 698 maskbits); 699 } 700} 701 702/** 703 * msi_capability_init - configure device's MSI capability structure 704 * @dev: pointer to the pci_dev data structure of MSI device function 705 * 706 * Setup the MSI capability structure of device function with a single 707 * MSI vector, regardless of device function is capable of handling 708 * multiple messages. A return of zero indicates the successful setup 709 * of an entry zero with the new MSI vector or non-zero for otherwise. 710 **/ 711static int msi_capability_init(struct pci_dev *dev) 712{ 713 struct msi_desc *entry; 714 int pos, vector; 715 u16 control; 716 717 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 718 pci_read_config_word(dev, msi_control_reg(pos), &control); 719 /* MSI Entry Initialization */ 720 entry = alloc_msi_entry(); 721 if (!entry) 722 return -ENOMEM; 723 724 vector = get_msi_vector(dev); 725 if (vector < 0) { 726 kmem_cache_free(msi_cachep, entry); 727 return -EBUSY; 728 } 729 entry->link.head = vector; 730 entry->link.tail = vector; 731 entry->msi_attrib.type = PCI_CAP_ID_MSI; 732 entry->msi_attrib.state = 0; /* Mark it not active */ 733 entry->msi_attrib.entry_nr = 0; 734 entry->msi_attrib.maskbit = is_mask_bit_support(control); 735 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */ 736 dev->irq = vector; 737 entry->dev = dev; 738 if (is_mask_bit_support(control)) { 739 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, 740 is_64bit_address(control)); 741 } 742 /* Replace with MSI handler */ 743 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); 744 /* Configure MSI capability structure */ 745 msi_register_init(dev, entry); 746 747 attach_msi_entry(entry, vector); 748 /* Set MSI enabled bits */ 749 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 750 751 return 0; 752} 753 754/** 755 * msix_capability_init - configure device's MSI-X capability 756 * @dev: pointer to the pci_dev data structure of MSI-X device function 757 * @entries: pointer to an array of struct msix_entry entries 758 * @nvec: number of @entries 759 * 760 * Setup the MSI-X capability structure of device function with a 761 * single MSI-X vector. A return of zero indicates the successful setup of 762 * requested MSI-X entries with allocated vectors or non-zero for otherwise. 763 **/ 764static int msix_capability_init(struct pci_dev *dev, 765 struct msix_entry *entries, int nvec) 766{ 767 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; 768 struct msg_address address; 769 struct msg_data data; 770 int vector, pos, i, j, nr_entries, temp = 0; 771 unsigned long phys_addr; 772 u32 table_offset; 773 u16 control; 774 u8 bir; 775 void __iomem *base; 776 777 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 778 /* Request & Map MSI-X table region */ 779 pci_read_config_word(dev, msi_control_reg(pos), &control); 780 nr_entries = multi_msix_capable(control); 781 782 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); 783 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 784 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 785 phys_addr = pci_resource_start (dev, bir) + table_offset; 786 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 787 if (base == NULL) 788 return -ENOMEM; 789 790 /* MSI-X Table Initialization */ 791 for (i = 0; i < nvec; i++) { 792 entry = alloc_msi_entry(); 793 if (!entry) 794 break; 795 vector = get_msi_vector(dev); 796 if (vector < 0) 797 break; 798 799 j = entries[i].entry; 800 entries[i].vector = vector; 801 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 802 entry->msi_attrib.state = 0; /* Mark it not active */ 803 entry->msi_attrib.entry_nr = j; 804 entry->msi_attrib.maskbit = 1; 805 entry->msi_attrib.default_vector = dev->irq; 806 entry->dev = dev; 807 entry->mask_base = base; 808 if (!head) { 809 entry->link.head = vector; 810 entry->link.tail = vector; 811 head = entry; 812 } else { 813 entry->link.head = temp; 814 entry->link.tail = tail->link.tail; 815 tail->link.tail = vector; 816 head->link.head = vector; 817 } 818 temp = vector; 819 tail = entry; 820 /* Replace with MSI-X handler */ 821 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1); 822 /* Configure MSI-X capability structure */ 823 msi_address_init(&address); 824 msi_data_init(&data, vector); 825 entry->msi_attrib.current_cpu = 826 ((address.lo_address.u.dest_id >> 827 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); 828 writel(address.lo_address.value, 829 base + j * PCI_MSIX_ENTRY_SIZE + 830 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 831 writel(address.hi_address, 832 base + j * PCI_MSIX_ENTRY_SIZE + 833 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 834 writel(*(u32*)&data, 835 base + j * PCI_MSIX_ENTRY_SIZE + 836 PCI_MSIX_ENTRY_DATA_OFFSET); 837 attach_msi_entry(entry, vector); 838 } 839 if (i != nvec) { 840 i--; 841 for (; i >= 0; i--) { 842 vector = (entries + i)->vector; 843 msi_free_vector(dev, vector, 0); 844 (entries + i)->vector = 0; 845 } 846 return -EBUSY; 847 } 848 /* Set MSI-X enabled bits */ 849 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 850 851 return 0; 852} 853 854/** 855 * pci_enable_msi - configure device's MSI capability structure 856 * @dev: pointer to the pci_dev data structure of MSI device function 857 * 858 * Setup the MSI capability structure of device function with 859 * a single MSI vector upon its software driver call to request for 860 * MSI mode enabled on its hardware device function. A return of zero 861 * indicates the successful setup of an entry zero with the new MSI 862 * vector or non-zero for otherwise. 863 **/ 864int pci_enable_msi(struct pci_dev* dev) 865{ 866 int pos, temp, status = -EINVAL; 867 u16 control; 868 869 if (!pci_msi_enable || !dev) 870 return status; 871 872 if (dev->no_msi) 873 return status; 874 875 if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 876 return -EINVAL; 877 878 temp = dev->irq; 879 880 status = msi_init(); 881 if (status < 0) 882 return status; 883 884 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 885 if (!pos) 886 return -EINVAL; 887 888 pci_read_config_word(dev, msi_control_reg(pos), &control); 889 if (control & PCI_MSI_FLAGS_ENABLE) 890 return 0; /* Already in MSI mode */ 891 892 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 893 /* Lookup Sucess */ 894 unsigned long flags; 895 896 spin_lock_irqsave(&msi_lock, flags); 897 if (!vector_irq[dev->irq]) { 898 msi_desc[dev->irq]->msi_attrib.state = 0; 899 vector_irq[dev->irq] = -1; 900 nr_released_vectors--; 901 spin_unlock_irqrestore(&msi_lock, flags); 902 msi_register_init(dev, msi_desc[dev->irq]); 903 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 904 return 0; 905 } 906 spin_unlock_irqrestore(&msi_lock, flags); 907 dev->irq = temp; 908 } 909 /* Check whether driver already requested for MSI-X vectors */ 910 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 911 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 912 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 913 "Device already has MSI-X vectors assigned\n", 914 pci_name(dev)); 915 dev->irq = temp; 916 return -EINVAL; 917 } 918 status = msi_capability_init(dev); 919 if (!status) { 920 if (!pos) 921 nr_reserved_vectors--; /* Only MSI capable */ 922 else if (nr_msix_devices > 0) 923 nr_msix_devices--; /* Both MSI and MSI-X capable, 924 but choose enabling MSI */ 925 } 926 927 return status; 928} 929 930void pci_disable_msi(struct pci_dev* dev) 931{ 932 struct msi_desc *entry; 933 int pos, default_vector; 934 u16 control; 935 unsigned long flags; 936 937 if (!pci_msi_enable) 938 return; 939 if (!dev) 940 return; 941 942 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 943 if (!pos) 944 return; 945 946 pci_read_config_word(dev, msi_control_reg(pos), &control); 947 if (!(control & PCI_MSI_FLAGS_ENABLE)) 948 return; 949 950 spin_lock_irqsave(&msi_lock, flags); 951 entry = msi_desc[dev->irq]; 952 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 953 spin_unlock_irqrestore(&msi_lock, flags); 954 return; 955 } 956 if (entry->msi_attrib.state) { 957 spin_unlock_irqrestore(&msi_lock, flags); 958 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " 959 "free_irq() on MSI vector %d\n", 960 pci_name(dev), dev->irq); 961 BUG_ON(entry->msi_attrib.state > 0); 962 } else { 963 vector_irq[dev->irq] = 0; /* free it */ 964 nr_released_vectors++; 965 default_vector = entry->msi_attrib.default_vector; 966 spin_unlock_irqrestore(&msi_lock, flags); 967 /* Restore dev->irq to its default pin-assertion vector */ 968 dev->irq = default_vector; 969 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 970 PCI_CAP_ID_MSI); 971 } 972} 973 974static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) 975{ 976 struct msi_desc *entry; 977 int head, entry_nr, type; 978 void __iomem *base; 979 unsigned long flags; 980 981 spin_lock_irqsave(&msi_lock, flags); 982 entry = msi_desc[vector]; 983 if (!entry || entry->dev != dev) { 984 spin_unlock_irqrestore(&msi_lock, flags); 985 return -EINVAL; 986 } 987 type = entry->msi_attrib.type; 988 entry_nr = entry->msi_attrib.entry_nr; 989 head = entry->link.head; 990 base = entry->mask_base; 991 msi_desc[entry->link.head]->link.tail = entry->link.tail; 992 msi_desc[entry->link.tail]->link.head = entry->link.head; 993 entry->dev = NULL; 994 if (!reassign) { 995 vector_irq[vector] = 0; 996 nr_released_vectors++; 997 } 998 msi_desc[vector] = NULL; 999 spin_unlock_irqrestore(&msi_lock, flags); 1000 1001 kmem_cache_free(msi_cachep, entry); 1002 1003 if (type == PCI_CAP_ID_MSIX) { 1004 if (!reassign) 1005 writel(1, base + 1006 entry_nr * PCI_MSIX_ENTRY_SIZE + 1007 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 1008 1009 if (head == vector) { 1010 /* 1011 * Detect last MSI-X vector to be released. 1012 * Release the MSI-X memory-mapped table. 1013 */ 1014#if 0 1015 int pos, nr_entries; 1016 unsigned long phys_addr; 1017 u32 table_offset; 1018 u16 control; 1019 u8 bir; 1020 1021 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 1022 pci_read_config_word(dev, msi_control_reg(pos), 1023 &control); 1024 nr_entries = multi_msix_capable(control); 1025 pci_read_config_dword(dev, msix_table_offset_reg(pos), 1026 &table_offset); 1027 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 1028 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 1029 phys_addr = pci_resource_start(dev, bir) + table_offset; 1030/* 1031 * FIXME! and what did you want to do with phys_addr? 1032 */ 1033#endif 1034 iounmap(base); 1035 } 1036 } 1037 1038 return 0; 1039} 1040 1041static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec) 1042{ 1043 int vector = head, tail = 0; 1044 int i, j = 0, nr_entries = 0; 1045 void __iomem *base; 1046 unsigned long flags; 1047 1048 spin_lock_irqsave(&msi_lock, flags); 1049 while (head != tail) { 1050 nr_entries++; 1051 tail = msi_desc[vector]->link.tail; 1052 if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr) 1053 j = vector; 1054 vector = tail; 1055 } 1056 if (*nvec > nr_entries) { 1057 spin_unlock_irqrestore(&msi_lock, flags); 1058 *nvec = nr_entries; 1059 return -EINVAL; 1060 } 1061 vector = ((j > 0) ? j : head); 1062 for (i = 0; i < *nvec; i++) { 1063 j = msi_desc[vector]->msi_attrib.entry_nr; 1064 msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */ 1065 vector_irq[vector] = -1; /* Mark it busy */ 1066 nr_released_vectors--; 1067 entries[i].vector = vector; 1068 if (j != (entries + i)->entry) { 1069 base = msi_desc[vector]->mask_base; 1070 msi_desc[vector]->msi_attrib.entry_nr = 1071 (entries + i)->entry; 1072 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE + 1073 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base + 1074 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE + 1075 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 1076 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE + 1077 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base + 1078 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE + 1079 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 1080 writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE + 1081 PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector, 1082 base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE + 1083 PCI_MSIX_ENTRY_DATA_OFFSET); 1084 } 1085 vector = msi_desc[vector]->link.tail; 1086 } 1087 spin_unlock_irqrestore(&msi_lock, flags); 1088 1089 return 0; 1090} 1091 1092/** 1093 * pci_enable_msix - configure device's MSI-X capability structure 1094 * @dev: pointer to the pci_dev data structure of MSI-X device function 1095 * @entries: pointer to an array of MSI-X entries 1096 * @nvec: number of MSI-X vectors requested for allocation by device driver 1097 * 1098 * Setup the MSI-X capability structure of device function with the number 1099 * of requested vectors upon its software driver call to request for 1100 * MSI-X mode enabled on its hardware device function. A return of zero 1101 * indicates the successful configuration of MSI-X capability structure 1102 * with new allocated MSI-X vectors. A return of < 0 indicates a failure. 1103 * Or a return of > 0 indicates that driver request is exceeding the number 1104 * of vectors available. Driver should use the returned value to re-send 1105 * its request. 1106 **/ 1107int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 1108{ 1109 int status, pos, nr_entries, free_vectors; 1110 int i, j, temp; 1111 u16 control; 1112 unsigned long flags; 1113 1114 if (!pci_msi_enable || !dev || !entries) 1115 return -EINVAL; 1116 1117 status = msi_init(); 1118 if (status < 0) 1119 return status; 1120 1121 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 1122 if (!pos) 1123 return -EINVAL; 1124 1125 pci_read_config_word(dev, msi_control_reg(pos), &control); 1126 if (control & PCI_MSIX_FLAGS_ENABLE) 1127 return -EINVAL; /* Already in MSI-X mode */ 1128 1129 nr_entries = multi_msix_capable(control); 1130 if (nvec > nr_entries) 1131 return -EINVAL; 1132 1133 /* Check for any invalid entries */ 1134 for (i = 0; i < nvec; i++) { 1135 if (entries[i].entry >= nr_entries) 1136 return -EINVAL; /* invalid entry */ 1137 for (j = i + 1; j < nvec; j++) { 1138 if (entries[i].entry == entries[j].entry) 1139 return -EINVAL; /* duplicate entry */ 1140 } 1141 } 1142 temp = dev->irq; 1143 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 1144 /* Lookup Sucess */ 1145 nr_entries = nvec; 1146 /* Reroute MSI-X table */ 1147 if (reroute_msix_table(dev->irq, entries, &nr_entries)) { 1148 /* #requested > #previous-assigned */ 1149 dev->irq = temp; 1150 return nr_entries; 1151 } 1152 dev->irq = temp; 1153 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 1154 return 0; 1155 } 1156 /* Check whether driver already requested for MSI vector */ 1157 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && 1158 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 1159 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 1160 "Device already has an MSI vector assigned\n", 1161 pci_name(dev)); 1162 dev->irq = temp; 1163 return -EINVAL; 1164 } 1165 1166 spin_lock_irqsave(&msi_lock, flags); 1167 /* 1168 * msi_lock is provided to ensure that enough vectors resources are 1169 * available before granting. 1170 */ 1171 free_vectors = pci_vector_resources(last_alloc_vector, 1172 nr_released_vectors); 1173 /* Ensure that each MSI/MSI-X device has one vector reserved by 1174 default to avoid any MSI-X driver to take all available 1175 resources */ 1176 free_vectors -= nr_reserved_vectors; 1177 /* Find the average of free vectors among MSI-X devices */ 1178 if (nr_msix_devices > 0) 1179 free_vectors /= nr_msix_devices; 1180 spin_unlock_irqrestore(&msi_lock, flags); 1181 1182 if (nvec > free_vectors) { 1183 if (free_vectors > 0) 1184 return free_vectors; 1185 else 1186 return -EBUSY; 1187 } 1188 1189 status = msix_capability_init(dev, entries, nvec); 1190 if (!status && nr_msix_devices > 0) 1191 nr_msix_devices--; 1192 1193 return status; 1194} 1195 1196void pci_disable_msix(struct pci_dev* dev) 1197{ 1198 int pos, temp; 1199 u16 control; 1200 1201 if (!pci_msi_enable) 1202 return; 1203 if (!dev) 1204 return; 1205 1206 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 1207 if (!pos) 1208 return; 1209 1210 pci_read_config_word(dev, msi_control_reg(pos), &control); 1211 if (!(control & PCI_MSIX_FLAGS_ENABLE)) 1212 return; 1213 1214 temp = dev->irq; 1215 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 1216 int state, vector, head, tail = 0, warning = 0; 1217 unsigned long flags; 1218 1219 vector = head = dev->irq; 1220 spin_lock_irqsave(&msi_lock, flags); 1221 while (head != tail) { 1222 state = msi_desc[vector]->msi_attrib.state; 1223 if (state) 1224 warning = 1; 1225 else { 1226 vector_irq[vector] = 0; /* free it */ 1227 nr_released_vectors++; 1228 } 1229 tail = msi_desc[vector]->link.tail; 1230 vector = tail; 1231 } 1232 spin_unlock_irqrestore(&msi_lock, flags); 1233 if (warning) { 1234 dev->irq = temp; 1235 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without " 1236 "free_irq() on all MSI-X vectors\n", 1237 pci_name(dev)); 1238 BUG_ON(warning > 0); 1239 } else { 1240 dev->irq = temp; 1241 disable_msi_mode(dev, 1242 pci_find_capability(dev, PCI_CAP_ID_MSIX), 1243 PCI_CAP_ID_MSIX); 1244 1245 } 1246 } 1247} 1248 1249/** 1250 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state 1251 * @dev: pointer to the pci_dev data structure of MSI(X) device function 1252 * 1253 * Being called during hotplug remove, from which the device function 1254 * is hot-removed. All previous assigned MSI/MSI-X vectors, if 1255 * allocated for this device function, are reclaimed to unused state, 1256 * which may be used later on. 1257 **/ 1258void msi_remove_pci_irq_vectors(struct pci_dev* dev) 1259{ 1260 int state, pos, temp; 1261 unsigned long flags; 1262 1263 if (!pci_msi_enable || !dev) 1264 return; 1265 1266 temp = dev->irq; /* Save IOAPIC IRQ */ 1267 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 1268 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 1269 spin_lock_irqsave(&msi_lock, flags); 1270 state = msi_desc[dev->irq]->msi_attrib.state; 1271 spin_unlock_irqrestore(&msi_lock, flags); 1272 if (state) { 1273 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 1274 "called without free_irq() on MSI vector %d\n", 1275 pci_name(dev), dev->irq); 1276 BUG_ON(state > 0); 1277 } else /* Release MSI vector assigned to this device */ 1278 msi_free_vector(dev, dev->irq, 0); 1279 dev->irq = temp; /* Restore IOAPIC IRQ */ 1280 } 1281 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 1282 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 1283 int vector, head, tail = 0, warning = 0; 1284 void __iomem *base = NULL; 1285 1286 vector = head = dev->irq; 1287 while (head != tail) { 1288 spin_lock_irqsave(&msi_lock, flags); 1289 state = msi_desc[vector]->msi_attrib.state; 1290 tail = msi_desc[vector]->link.tail; 1291 base = msi_desc[vector]->mask_base; 1292 spin_unlock_irqrestore(&msi_lock, flags); 1293 if (state) 1294 warning = 1; 1295 else if (vector != head) /* Release MSI-X vector */ 1296 msi_free_vector(dev, vector, 0); 1297 vector = tail; 1298 } 1299 msi_free_vector(dev, vector, 0); 1300 if (warning) { 1301 /* Force to release the MSI-X memory-mapped table */ 1302#if 0 1303 unsigned long phys_addr; 1304 u32 table_offset; 1305 u16 control; 1306 u8 bir; 1307 1308 pci_read_config_word(dev, msi_control_reg(pos), 1309 &control); 1310 pci_read_config_dword(dev, msix_table_offset_reg(pos), 1311 &table_offset); 1312 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 1313 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; 1314 phys_addr = pci_resource_start(dev, bir) + table_offset; 1315/* 1316 * FIXME! and what did you want to do with phys_addr? 1317 */ 1318#endif 1319 iounmap(base); 1320 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 1321 "called without free_irq() on all MSI-X vectors\n", 1322 pci_name(dev)); 1323 BUG_ON(warning > 0); 1324 } 1325 dev->irq = temp; /* Restore IOAPIC IRQ */ 1326 } 1327} 1328 1329void pci_no_msi(void) 1330{ 1331 pci_msi_enable = 0; 1332} 1333 1334EXPORT_SYMBOL(pci_enable_msi); 1335EXPORT_SYMBOL(pci_disable_msi); 1336EXPORT_SYMBOL(pci_enable_msix); 1337EXPORT_SYMBOL(pci_disable_msix); 1338