msi.c revision abad2ec98f2ef357d62026cbc3989dabf33f2435
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17#include <linux/msi.h>
18#include <linux/smp.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22
23#include "pci.h"
24#include "msi.h"
25
26static int pci_msi_enable = 1;
27
28/* Arch hooks */
29
30int __attribute__ ((weak))
31arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{
33	return 0;
34}
35
36int __attribute__ ((weak))
37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
38{
39	return 0;
40}
41
42int __attribute__ ((weak))
43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44{
45	struct msi_desc *entry;
46	int ret;
47
48	list_for_each_entry(entry, &dev->msi_list, list) {
49		ret = arch_setup_msi_irq(dev, entry);
50		if (ret)
51			return ret;
52	}
53
54	return 0;
55}
56
57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
58{
59	return;
60}
61
62void __attribute__ ((weak))
63arch_teardown_msi_irqs(struct pci_dev *dev)
64{
65	struct msi_desc *entry;
66
67	list_for_each_entry(entry, &dev->msi_list, list) {
68		if (entry->irq != 0)
69			arch_teardown_msi_irq(entry->irq);
70	}
71}
72
73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{
75	u16 control;
76
77	if (pos) {
78		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
79		control &= ~PCI_MSI_FLAGS_ENABLE;
80		if (enable)
81			control |= PCI_MSI_FLAGS_ENABLE;
82		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
83	}
84}
85
86static void msi_set_enable(struct pci_dev *dev, int enable)
87{
88	__msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
89}
90
91static void msix_set_enable(struct pci_dev *dev, int enable)
92{
93	int pos;
94	u16 control;
95
96	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
97	if (pos) {
98		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
99		control &= ~PCI_MSIX_FLAGS_ENABLE;
100		if (enable)
101			control |= PCI_MSIX_FLAGS_ENABLE;
102		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
103	}
104}
105
106static void msix_flush_writes(unsigned int irq)
107{
108	struct msi_desc *entry;
109
110	entry = get_irq_msi(irq);
111	BUG_ON(!entry || !entry->dev);
112	switch (entry->msi_attrib.type) {
113	case PCI_CAP_ID_MSI:
114		/* nothing to do */
115		break;
116	case PCI_CAP_ID_MSIX:
117	{
118		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
119			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
120		readl(entry->mask_base + offset);
121		break;
122	}
123	default:
124		BUG();
125		break;
126	}
127}
128
129/*
130 * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
131 * mask all MSI interrupts by clearing the MSI enable bit does not work
132 * reliably as devices without an INTx disable bit will then generate a
133 * level IRQ which will never be cleared.
134 *
135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
136 * doesn't support MSI masking.
137 */
138static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
139{
140	struct msi_desc *entry;
141
142	entry = get_irq_msi(irq);
143	BUG_ON(!entry || !entry->dev);
144	switch (entry->msi_attrib.type) {
145	case PCI_CAP_ID_MSI:
146		if (entry->msi_attrib.maskbit) {
147			int pos;
148			u32 mask_bits;
149
150			pos = (long)entry->mask_base;
151			pci_read_config_dword(entry->dev, pos, &mask_bits);
152			mask_bits &= ~(mask);
153			mask_bits |= flag & mask;
154			pci_write_config_dword(entry->dev, pos, mask_bits);
155		} else {
156			return 0;
157		}
158		break;
159	case PCI_CAP_ID_MSIX:
160	{
161		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
162			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
163		writel(flag, entry->mask_base + offset);
164		readl(entry->mask_base + offset);
165		break;
166	}
167	default:
168		BUG();
169		break;
170	}
171	entry->msi_attrib.masked = !!flag;
172	return 1;
173}
174
175void read_msi_msg(unsigned int irq, struct msi_msg *msg)
176{
177	struct msi_desc *entry = get_irq_msi(irq);
178	switch(entry->msi_attrib.type) {
179	case PCI_CAP_ID_MSI:
180	{
181		struct pci_dev *dev = entry->dev;
182		int pos = entry->msi_attrib.pos;
183		u16 data;
184
185		pci_read_config_dword(dev, msi_lower_address_reg(pos),
186					&msg->address_lo);
187		if (entry->msi_attrib.is_64) {
188			pci_read_config_dword(dev, msi_upper_address_reg(pos),
189						&msg->address_hi);
190			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
191		} else {
192			msg->address_hi = 0;
193			pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
194		}
195		msg->data = data;
196		break;
197	}
198	case PCI_CAP_ID_MSIX:
199	{
200		void __iomem *base;
201		base = entry->mask_base +
202			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
203
204		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
205		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
206		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
207 		break;
208 	}
209 	default:
210		BUG();
211	}
212}
213
214void write_msi_msg(unsigned int irq, struct msi_msg *msg)
215{
216	struct msi_desc *entry = get_irq_msi(irq);
217	switch (entry->msi_attrib.type) {
218	case PCI_CAP_ID_MSI:
219	{
220		struct pci_dev *dev = entry->dev;
221		int pos = entry->msi_attrib.pos;
222
223		pci_write_config_dword(dev, msi_lower_address_reg(pos),
224					msg->address_lo);
225		if (entry->msi_attrib.is_64) {
226			pci_write_config_dword(dev, msi_upper_address_reg(pos),
227						msg->address_hi);
228			pci_write_config_word(dev, msi_data_reg(pos, 1),
229						msg->data);
230		} else {
231			pci_write_config_word(dev, msi_data_reg(pos, 0),
232						msg->data);
233		}
234		break;
235	}
236	case PCI_CAP_ID_MSIX:
237	{
238		void __iomem *base;
239		base = entry->mask_base +
240			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
241
242		writel(msg->address_lo,
243			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
244		writel(msg->address_hi,
245			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
246		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
247		break;
248	}
249	default:
250		BUG();
251	}
252	entry->msg = *msg;
253}
254
255void mask_msi_irq(unsigned int irq)
256{
257	msi_set_mask_bits(irq, 1, 1);
258	msix_flush_writes(irq);
259}
260
261void unmask_msi_irq(unsigned int irq)
262{
263	msi_set_mask_bits(irq, 1, 0);
264	msix_flush_writes(irq);
265}
266
267static int msi_free_irqs(struct pci_dev* dev);
268
269
270static struct msi_desc* alloc_msi_entry(void)
271{
272	struct msi_desc *entry;
273
274	entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
275	if (!entry)
276		return NULL;
277
278	INIT_LIST_HEAD(&entry->list);
279	entry->irq = 0;
280	entry->dev = NULL;
281
282	return entry;
283}
284
285static void pci_intx_for_msi(struct pci_dev *dev, int enable)
286{
287	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
288		pci_intx(dev, enable);
289}
290
291static void __pci_restore_msi_state(struct pci_dev *dev)
292{
293	int pos;
294	u16 control;
295	struct msi_desc *entry;
296
297	if (!dev->msi_enabled)
298		return;
299
300	entry = get_irq_msi(dev->irq);
301	pos = entry->msi_attrib.pos;
302
303	pci_intx_for_msi(dev, 0);
304	msi_set_enable(dev, 0);
305	write_msi_msg(dev->irq, &entry->msg);
306	if (entry->msi_attrib.maskbit)
307		msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask,
308				  entry->msi_attrib.masked);
309
310	pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
311	control &= ~PCI_MSI_FLAGS_QSIZE;
312	control |= PCI_MSI_FLAGS_ENABLE;
313	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
314}
315
316static void __pci_restore_msix_state(struct pci_dev *dev)
317{
318	int pos;
319	struct msi_desc *entry;
320	u16 control;
321
322	if (!dev->msix_enabled)
323		return;
324
325	/* route the table */
326	pci_intx_for_msi(dev, 0);
327	msix_set_enable(dev, 0);
328
329	list_for_each_entry(entry, &dev->msi_list, list) {
330		write_msi_msg(entry->irq, &entry->msg);
331		msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked);
332	}
333
334	BUG_ON(list_empty(&dev->msi_list));
335	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
336	pos = entry->msi_attrib.pos;
337	pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
338	control &= ~PCI_MSIX_FLAGS_MASKALL;
339	control |= PCI_MSIX_FLAGS_ENABLE;
340	pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
341}
342
343void pci_restore_msi_state(struct pci_dev *dev)
344{
345	__pci_restore_msi_state(dev);
346	__pci_restore_msix_state(dev);
347}
348EXPORT_SYMBOL_GPL(pci_restore_msi_state);
349
350/**
351 * msi_capability_init - configure device's MSI capability structure
352 * @dev: pointer to the pci_dev data structure of MSI device function
353 *
354 * Setup the MSI capability structure of device function with a single
355 * MSI irq, regardless of device function is capable of handling
356 * multiple messages. A return of zero indicates the successful setup
357 * of an entry zero with the new MSI irq or non-zero for otherwise.
358 **/
359static int msi_capability_init(struct pci_dev *dev)
360{
361	struct msi_desc *entry;
362	int pos, ret;
363	u16 control;
364
365	msi_set_enable(dev, 0);	/* Ensure msi is disabled as I set it up */
366
367   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
368	pci_read_config_word(dev, msi_control_reg(pos), &control);
369	/* MSI Entry Initialization */
370	entry = alloc_msi_entry();
371	if (!entry)
372		return -ENOMEM;
373
374	entry->msi_attrib.type = PCI_CAP_ID_MSI;
375	entry->msi_attrib.is_64 = is_64bit_address(control);
376	entry->msi_attrib.entry_nr = 0;
377	entry->msi_attrib.maskbit = is_mask_bit_support(control);
378	entry->msi_attrib.masked = 1;
379	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
380	entry->msi_attrib.pos = pos;
381	if (is_mask_bit_support(control)) {
382		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
383				is_64bit_address(control));
384	}
385	entry->dev = dev;
386	if (entry->msi_attrib.maskbit) {
387		unsigned int maskbits, temp;
388		/* All MSIs are unmasked by default, Mask them all */
389		pci_read_config_dword(dev,
390			msi_mask_bits_reg(pos, is_64bit_address(control)),
391			&maskbits);
392		temp = (1 << multi_msi_capable(control));
393		temp = ((temp - 1) & ~temp);
394		maskbits |= temp;
395		pci_write_config_dword(dev,
396			msi_mask_bits_reg(pos, is_64bit_address(control)),
397			maskbits);
398		entry->msi_attrib.maskbits_mask = temp;
399	}
400	list_add_tail(&entry->list, &dev->msi_list);
401
402	/* Configure MSI capability structure */
403	ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
404	if (ret) {
405		msi_free_irqs(dev);
406		return ret;
407	}
408
409	/* Set MSI enabled bits	 */
410	pci_intx_for_msi(dev, 0);
411	msi_set_enable(dev, 1);
412	dev->msi_enabled = 1;
413
414	dev->irq = entry->irq;
415	return 0;
416}
417
418/**
419 * msix_capability_init - configure device's MSI-X capability
420 * @dev: pointer to the pci_dev data structure of MSI-X device function
421 * @entries: pointer to an array of struct msix_entry entries
422 * @nvec: number of @entries
423 *
424 * Setup the MSI-X capability structure of device function with a
425 * single MSI-X irq. A return of zero indicates the successful setup of
426 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
427 **/
428static int msix_capability_init(struct pci_dev *dev,
429				struct msix_entry *entries, int nvec)
430{
431	struct msi_desc *entry;
432	int pos, i, j, nr_entries, ret;
433	unsigned long phys_addr;
434	u32 table_offset;
435 	u16 control;
436	u8 bir;
437	void __iomem *base;
438
439	msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
440
441   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
442	/* Request & Map MSI-X table region */
443 	pci_read_config_word(dev, msi_control_reg(pos), &control);
444	nr_entries = multi_msix_capable(control);
445
446 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
447	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
448	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
449	phys_addr = pci_resource_start (dev, bir) + table_offset;
450	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
451	if (base == NULL)
452		return -ENOMEM;
453
454	/* MSI-X Table Initialization */
455	for (i = 0; i < nvec; i++) {
456		entry = alloc_msi_entry();
457		if (!entry)
458			break;
459
460 		j = entries[i].entry;
461		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
462		entry->msi_attrib.is_64 = 1;
463		entry->msi_attrib.entry_nr = j;
464		entry->msi_attrib.maskbit = 1;
465		entry->msi_attrib.masked = 1;
466		entry->msi_attrib.default_irq = dev->irq;
467		entry->msi_attrib.pos = pos;
468		entry->dev = dev;
469		entry->mask_base = base;
470
471		list_add_tail(&entry->list, &dev->msi_list);
472	}
473
474	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
475	if (ret) {
476		int avail = 0;
477		list_for_each_entry(entry, &dev->msi_list, list) {
478			if (entry->irq != 0) {
479				avail++;
480			}
481		}
482
483		msi_free_irqs(dev);
484
485		/* If we had some success report the number of irqs
486		 * we succeeded in setting up.
487		 */
488		if (avail == 0)
489			avail = ret;
490		return avail;
491	}
492
493	i = 0;
494	list_for_each_entry(entry, &dev->msi_list, list) {
495		entries[i].vector = entry->irq;
496		set_irq_msi(entry->irq, entry);
497		i++;
498	}
499	/* Set MSI-X enabled bits */
500	pci_intx_for_msi(dev, 0);
501	msix_set_enable(dev, 1);
502	dev->msix_enabled = 1;
503
504	return 0;
505}
506
507/**
508 * pci_msi_check_device - check whether MSI may be enabled on a device
509 * @dev: pointer to the pci_dev data structure of MSI device function
510 * @nvec: how many MSIs have been requested ?
511 * @type: are we checking for MSI or MSI-X ?
512 *
513 * Look at global flags, the device itself, and its parent busses
514 * to determine if MSI/-X are supported for the device. If MSI/-X is
515 * supported return 0, else return an error code.
516 **/
517static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
518{
519	struct pci_bus *bus;
520	int ret;
521
522	/* MSI must be globally enabled and supported by the device */
523	if (!pci_msi_enable || !dev || dev->no_msi)
524		return -EINVAL;
525
526	/*
527	 * You can't ask to have 0 or less MSIs configured.
528	 *  a) it's stupid ..
529	 *  b) the list manipulation code assumes nvec >= 1.
530	 */
531	if (nvec < 1)
532		return -ERANGE;
533
534	/* Any bridge which does NOT route MSI transactions from it's
535	 * secondary bus to it's primary bus must set NO_MSI flag on
536	 * the secondary pci_bus.
537	 * We expect only arch-specific PCI host bus controller driver
538	 * or quirks for specific PCI bridges to be setting NO_MSI.
539	 */
540	for (bus = dev->bus; bus; bus = bus->parent)
541		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
542			return -EINVAL;
543
544	ret = arch_msi_check_device(dev, nvec, type);
545	if (ret)
546		return ret;
547
548	if (!pci_find_capability(dev, type))
549		return -EINVAL;
550
551	return 0;
552}
553
554/**
555 * pci_enable_msi - configure device's MSI capability structure
556 * @dev: pointer to the pci_dev data structure of MSI device function
557 *
558 * Setup the MSI capability structure of device function with
559 * a single MSI irq upon its software driver call to request for
560 * MSI mode enabled on its hardware device function. A return of zero
561 * indicates the successful setup of an entry zero with the new MSI
562 * irq or non-zero for otherwise.
563 **/
564int pci_enable_msi(struct pci_dev* dev)
565{
566	int status;
567
568	status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
569	if (status)
570		return status;
571
572	WARN_ON(!!dev->msi_enabled);
573
574	/* Check whether driver already requested for MSI-X irqs */
575	if (dev->msix_enabled) {
576		dev_info(&dev->dev, "can't enable MSI "
577			 "(MSI-X already enabled)\n");
578		return -EINVAL;
579	}
580	status = msi_capability_init(dev);
581	return status;
582}
583EXPORT_SYMBOL(pci_enable_msi);
584
585void pci_msi_shutdown(struct pci_dev* dev)
586{
587	struct msi_desc *entry;
588
589	if (!pci_msi_enable || !dev || !dev->msi_enabled)
590		return;
591
592	msi_set_enable(dev, 0);
593	pci_intx_for_msi(dev, 1);
594	dev->msi_enabled = 0;
595
596	BUG_ON(list_empty(&dev->msi_list));
597	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
598	/* Return the the pci reset with msi irqs unmasked */
599	if (entry->msi_attrib.maskbit) {
600		u32 mask = entry->msi_attrib.maskbits_mask;
601		msi_set_mask_bits(dev->irq, mask, ~mask);
602	}
603	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
604		return;
605
606	/* Restore dev->irq to its default pin-assertion irq */
607	dev->irq = entry->msi_attrib.default_irq;
608}
609void pci_disable_msi(struct pci_dev* dev)
610{
611	struct msi_desc *entry;
612
613	if (!pci_msi_enable || !dev || !dev->msi_enabled)
614		return;
615
616	pci_msi_shutdown(dev);
617
618	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
619	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
620		return;
621
622	msi_free_irqs(dev);
623}
624EXPORT_SYMBOL(pci_disable_msi);
625
626static int msi_free_irqs(struct pci_dev* dev)
627{
628	struct msi_desc *entry, *tmp;
629
630	list_for_each_entry(entry, &dev->msi_list, list) {
631		if (entry->irq)
632			BUG_ON(irq_has_action(entry->irq));
633	}
634
635	arch_teardown_msi_irqs(dev);
636
637	list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
638		if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
639			writel(1, entry->mask_base + entry->msi_attrib.entry_nr
640				  * PCI_MSIX_ENTRY_SIZE
641				  + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
642
643			if (list_is_last(&entry->list, &dev->msi_list))
644				iounmap(entry->mask_base);
645		}
646		list_del(&entry->list);
647		kfree(entry);
648	}
649
650	return 0;
651}
652
653/**
654 * pci_enable_msix - configure device's MSI-X capability structure
655 * @dev: pointer to the pci_dev data structure of MSI-X device function
656 * @entries: pointer to an array of MSI-X entries
657 * @nvec: number of MSI-X irqs requested for allocation by device driver
658 *
659 * Setup the MSI-X capability structure of device function with the number
660 * of requested irqs upon its software driver call to request for
661 * MSI-X mode enabled on its hardware device function. A return of zero
662 * indicates the successful configuration of MSI-X capability structure
663 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
664 * Or a return of > 0 indicates that driver request is exceeding the number
665 * of irqs available. Driver should use the returned value to re-send
666 * its request.
667 **/
668int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
669{
670	int status, pos, nr_entries;
671	int i, j;
672	u16 control;
673
674	if (!entries)
675 		return -EINVAL;
676
677	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
678	if (status)
679		return status;
680
681	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
682	pci_read_config_word(dev, msi_control_reg(pos), &control);
683	nr_entries = multi_msix_capable(control);
684	if (nvec > nr_entries)
685		return -EINVAL;
686
687	/* Check for any invalid entries */
688	for (i = 0; i < nvec; i++) {
689		if (entries[i].entry >= nr_entries)
690			return -EINVAL;		/* invalid entry */
691		for (j = i + 1; j < nvec; j++) {
692			if (entries[i].entry == entries[j].entry)
693				return -EINVAL;	/* duplicate entry */
694		}
695	}
696	WARN_ON(!!dev->msix_enabled);
697
698	/* Check whether driver already requested for MSI irq */
699   	if (dev->msi_enabled) {
700		dev_info(&dev->dev, "can't enable MSI-X "
701		       "(MSI IRQ already assigned)\n");
702		return -EINVAL;
703	}
704	status = msix_capability_init(dev, entries, nvec);
705	return status;
706}
707EXPORT_SYMBOL(pci_enable_msix);
708
709static void msix_free_all_irqs(struct pci_dev *dev)
710{
711	msi_free_irqs(dev);
712}
713
714void pci_msix_shutdown(struct pci_dev* dev)
715{
716	if (!pci_msi_enable || !dev || !dev->msix_enabled)
717		return;
718
719	msix_set_enable(dev, 0);
720	pci_intx_for_msi(dev, 1);
721	dev->msix_enabled = 0;
722}
723void pci_disable_msix(struct pci_dev* dev)
724{
725	if (!pci_msi_enable || !dev || !dev->msix_enabled)
726		return;
727
728	pci_msix_shutdown(dev);
729
730	msix_free_all_irqs(dev);
731}
732EXPORT_SYMBOL(pci_disable_msix);
733
734/**
735 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
736 * @dev: pointer to the pci_dev data structure of MSI(X) device function
737 *
738 * Being called during hotplug remove, from which the device function
739 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
740 * allocated for this device function, are reclaimed to unused state,
741 * which may be used later on.
742 **/
743void msi_remove_pci_irq_vectors(struct pci_dev* dev)
744{
745	if (!pci_msi_enable || !dev)
746 		return;
747
748	if (dev->msi_enabled)
749		msi_free_irqs(dev);
750
751	if (dev->msix_enabled)
752		msix_free_all_irqs(dev);
753}
754
755void pci_no_msi(void)
756{
757	pci_msi_enable = 0;
758}
759
760void pci_msi_init_pci_dev(struct pci_dev *dev)
761{
762	INIT_LIST_HEAD(&dev->msi_list);
763}
764