msi.c revision 5ca5c02f0e81c094c19d30dc0d13be4e929a994a
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17#include <linux/msi.h>
18#include <linux/smp.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22
23#include "pci.h"
24#include "msi.h"
25
26static int pci_msi_enable = 1;
27
28/* Arch hooks */
29
30int __attribute__ ((weak))
31arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{
33	return 0;
34}
35
36int __attribute__ ((weak))
37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
38{
39	return 0;
40}
41
42int __attribute__ ((weak))
43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44{
45	struct msi_desc *entry;
46	int ret;
47
48	list_for_each_entry(entry, &dev->msi_list, list) {
49		ret = arch_setup_msi_irq(dev, entry);
50		if (ret)
51			return ret;
52	}
53
54	return 0;
55}
56
57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
58{
59	return;
60}
61
62void __attribute__ ((weak))
63arch_teardown_msi_irqs(struct pci_dev *dev)
64{
65	struct msi_desc *entry;
66
67	list_for_each_entry(entry, &dev->msi_list, list) {
68		if (entry->irq != 0)
69			arch_teardown_msi_irq(entry->irq);
70	}
71}
72
73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{
75	u16 control;
76
77	if (pos) {
78		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
79		control &= ~PCI_MSI_FLAGS_ENABLE;
80		if (enable)
81			control |= PCI_MSI_FLAGS_ENABLE;
82		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
83	}
84}
85
86static void msi_set_enable(struct pci_dev *dev, int enable)
87{
88	__msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
89}
90
91static void msix_set_enable(struct pci_dev *dev, int enable)
92{
93	int pos;
94	u16 control;
95
96	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
97	if (pos) {
98		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
99		control &= ~PCI_MSIX_FLAGS_ENABLE;
100		if (enable)
101			control |= PCI_MSIX_FLAGS_ENABLE;
102		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
103	}
104}
105
106static void msix_flush_writes(unsigned int irq)
107{
108	struct msi_desc *entry;
109
110	entry = get_irq_msi(irq);
111	BUG_ON(!entry || !entry->dev);
112	switch (entry->msi_attrib.type) {
113	case PCI_CAP_ID_MSI:
114		/* nothing to do */
115		break;
116	case PCI_CAP_ID_MSIX:
117	{
118		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
119			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
120		readl(entry->mask_base + offset);
121		break;
122	}
123	default:
124		BUG();
125		break;
126	}
127}
128
129static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
130{
131	struct msi_desc *entry;
132
133	entry = get_irq_msi(irq);
134	BUG_ON(!entry || !entry->dev);
135	switch (entry->msi_attrib.type) {
136	case PCI_CAP_ID_MSI:
137		if (entry->msi_attrib.maskbit) {
138			int pos;
139			u32 mask_bits;
140
141			pos = (long)entry->mask_base;
142			pci_read_config_dword(entry->dev, pos, &mask_bits);
143			mask_bits &= ~(mask);
144			mask_bits |= flag & mask;
145			pci_write_config_dword(entry->dev, pos, mask_bits);
146		} else {
147			__msi_set_enable(entry->dev, entry->msi_attrib.pos,
148					 !flag);
149		}
150		break;
151	case PCI_CAP_ID_MSIX:
152	{
153		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
154			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
155		writel(flag, entry->mask_base + offset);
156		readl(entry->mask_base + offset);
157		break;
158	}
159	default:
160		BUG();
161		break;
162	}
163	entry->msi_attrib.masked = !!flag;
164}
165
166void read_msi_msg(unsigned int irq, struct msi_msg *msg)
167{
168	struct msi_desc *entry = get_irq_msi(irq);
169	switch(entry->msi_attrib.type) {
170	case PCI_CAP_ID_MSI:
171	{
172		struct pci_dev *dev = entry->dev;
173		int pos = entry->msi_attrib.pos;
174		u16 data;
175
176		pci_read_config_dword(dev, msi_lower_address_reg(pos),
177					&msg->address_lo);
178		if (entry->msi_attrib.is_64) {
179			pci_read_config_dword(dev, msi_upper_address_reg(pos),
180						&msg->address_hi);
181			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
182		} else {
183			msg->address_hi = 0;
184			pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
185		}
186		msg->data = data;
187		break;
188	}
189	case PCI_CAP_ID_MSIX:
190	{
191		void __iomem *base;
192		base = entry->mask_base +
193			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
194
195		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
196		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
197		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
198 		break;
199 	}
200 	default:
201		BUG();
202	}
203}
204
205void write_msi_msg(unsigned int irq, struct msi_msg *msg)
206{
207	struct msi_desc *entry = get_irq_msi(irq);
208	switch (entry->msi_attrib.type) {
209	case PCI_CAP_ID_MSI:
210	{
211		struct pci_dev *dev = entry->dev;
212		int pos = entry->msi_attrib.pos;
213
214		pci_write_config_dword(dev, msi_lower_address_reg(pos),
215					msg->address_lo);
216		if (entry->msi_attrib.is_64) {
217			pci_write_config_dword(dev, msi_upper_address_reg(pos),
218						msg->address_hi);
219			pci_write_config_word(dev, msi_data_reg(pos, 1),
220						msg->data);
221		} else {
222			pci_write_config_word(dev, msi_data_reg(pos, 0),
223						msg->data);
224		}
225		break;
226	}
227	case PCI_CAP_ID_MSIX:
228	{
229		void __iomem *base;
230		base = entry->mask_base +
231			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
232
233		writel(msg->address_lo,
234			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
235		writel(msg->address_hi,
236			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
237		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
238		break;
239	}
240	default:
241		BUG();
242	}
243	entry->msg = *msg;
244}
245
246void mask_msi_irq(unsigned int irq)
247{
248	msi_set_mask_bits(irq, 1, 1);
249	msix_flush_writes(irq);
250}
251
252void unmask_msi_irq(unsigned int irq)
253{
254	msi_set_mask_bits(irq, 1, 0);
255	msix_flush_writes(irq);
256}
257
258static int msi_free_irqs(struct pci_dev* dev);
259
260
261static struct msi_desc* alloc_msi_entry(void)
262{
263	struct msi_desc *entry;
264
265	entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
266	if (!entry)
267		return NULL;
268
269	INIT_LIST_HEAD(&entry->list);
270	entry->irq = 0;
271	entry->dev = NULL;
272
273	return entry;
274}
275
276static void pci_intx_for_msi(struct pci_dev *dev, int enable)
277{
278	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
279		pci_intx(dev, enable);
280}
281
282static void __pci_restore_msi_state(struct pci_dev *dev)
283{
284	int pos;
285	u16 control;
286	struct msi_desc *entry;
287
288	if (!dev->msi_enabled)
289		return;
290
291	entry = get_irq_msi(dev->irq);
292	pos = entry->msi_attrib.pos;
293
294	pci_intx_for_msi(dev, 0);
295	msi_set_enable(dev, 0);
296	write_msi_msg(dev->irq, &entry->msg);
297	if (entry->msi_attrib.maskbit)
298		msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask,
299				  entry->msi_attrib.masked);
300
301	pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
302	control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
303	if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked)
304		control |= PCI_MSI_FLAGS_ENABLE;
305	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
306}
307
308static void __pci_restore_msix_state(struct pci_dev *dev)
309{
310	int pos;
311	struct msi_desc *entry;
312	u16 control;
313
314	if (!dev->msix_enabled)
315		return;
316
317	/* route the table */
318	pci_intx_for_msi(dev, 0);
319	msix_set_enable(dev, 0);
320
321	list_for_each_entry(entry, &dev->msi_list, list) {
322		write_msi_msg(entry->irq, &entry->msg);
323		msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked);
324	}
325
326	BUG_ON(list_empty(&dev->msi_list));
327	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
328	pos = entry->msi_attrib.pos;
329	pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
330	control &= ~PCI_MSIX_FLAGS_MASKALL;
331	control |= PCI_MSIX_FLAGS_ENABLE;
332	pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
333}
334
335void pci_restore_msi_state(struct pci_dev *dev)
336{
337	__pci_restore_msi_state(dev);
338	__pci_restore_msix_state(dev);
339}
340EXPORT_SYMBOL_GPL(pci_restore_msi_state);
341
342/**
343 * msi_capability_init - configure device's MSI capability structure
344 * @dev: pointer to the pci_dev data structure of MSI device function
345 *
346 * Setup the MSI capability structure of device function with a single
347 * MSI irq, regardless of device function is capable of handling
348 * multiple messages. A return of zero indicates the successful setup
349 * of an entry zero with the new MSI irq or non-zero for otherwise.
350 **/
351static int msi_capability_init(struct pci_dev *dev)
352{
353	struct msi_desc *entry;
354	int pos, ret;
355	u16 control;
356
357	msi_set_enable(dev, 0);	/* Ensure msi is disabled as I set it up */
358
359   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
360	pci_read_config_word(dev, msi_control_reg(pos), &control);
361	/* MSI Entry Initialization */
362	entry = alloc_msi_entry();
363	if (!entry)
364		return -ENOMEM;
365
366	entry->msi_attrib.type = PCI_CAP_ID_MSI;
367	entry->msi_attrib.is_64 = is_64bit_address(control);
368	entry->msi_attrib.entry_nr = 0;
369	entry->msi_attrib.maskbit = is_mask_bit_support(control);
370	entry->msi_attrib.masked = 1;
371	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
372	entry->msi_attrib.pos = pos;
373	if (is_mask_bit_support(control)) {
374		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
375				is_64bit_address(control));
376	}
377	entry->dev = dev;
378	if (entry->msi_attrib.maskbit) {
379		unsigned int maskbits, temp;
380		/* All MSIs are unmasked by default, Mask them all */
381		pci_read_config_dword(dev,
382			msi_mask_bits_reg(pos, is_64bit_address(control)),
383			&maskbits);
384		temp = (1 << multi_msi_capable(control));
385		temp = ((temp - 1) & ~temp);
386		maskbits |= temp;
387		pci_write_config_dword(dev,
388			msi_mask_bits_reg(pos, is_64bit_address(control)),
389			maskbits);
390		entry->msi_attrib.maskbits_mask = temp;
391	}
392	list_add_tail(&entry->list, &dev->msi_list);
393
394	/* Configure MSI capability structure */
395	ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
396	if (ret) {
397		msi_free_irqs(dev);
398		return ret;
399	}
400
401	/* Set MSI enabled bits	 */
402	pci_intx_for_msi(dev, 0);
403	msi_set_enable(dev, 1);
404	dev->msi_enabled = 1;
405
406	dev->irq = entry->irq;
407	return 0;
408}
409
410/**
411 * msix_capability_init - configure device's MSI-X capability
412 * @dev: pointer to the pci_dev data structure of MSI-X device function
413 * @entries: pointer to an array of struct msix_entry entries
414 * @nvec: number of @entries
415 *
416 * Setup the MSI-X capability structure of device function with a
417 * single MSI-X irq. A return of zero indicates the successful setup of
418 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
419 **/
420static int msix_capability_init(struct pci_dev *dev,
421				struct msix_entry *entries, int nvec)
422{
423	struct msi_desc *entry;
424	int pos, i, j, nr_entries, ret;
425	unsigned long phys_addr;
426	u32 table_offset;
427 	u16 control;
428	u8 bir;
429	void __iomem *base;
430
431	msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
432
433   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
434	/* Request & Map MSI-X table region */
435 	pci_read_config_word(dev, msi_control_reg(pos), &control);
436	nr_entries = multi_msix_capable(control);
437
438 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
439	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
440	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
441	phys_addr = pci_resource_start (dev, bir) + table_offset;
442	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
443	if (base == NULL)
444		return -ENOMEM;
445
446	/* MSI-X Table Initialization */
447	for (i = 0; i < nvec; i++) {
448		entry = alloc_msi_entry();
449		if (!entry)
450			break;
451
452 		j = entries[i].entry;
453		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
454		entry->msi_attrib.is_64 = 1;
455		entry->msi_attrib.entry_nr = j;
456		entry->msi_attrib.maskbit = 1;
457		entry->msi_attrib.masked = 1;
458		entry->msi_attrib.default_irq = dev->irq;
459		entry->msi_attrib.pos = pos;
460		entry->dev = dev;
461		entry->mask_base = base;
462
463		list_add_tail(&entry->list, &dev->msi_list);
464	}
465
466	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
467	if (ret) {
468		int avail = 0;
469		list_for_each_entry(entry, &dev->msi_list, list) {
470			if (entry->irq != 0) {
471				avail++;
472			}
473		}
474
475		msi_free_irqs(dev);
476
477		/* If we had some success report the number of irqs
478		 * we succeeded in setting up.
479		 */
480		if (avail == 0)
481			avail = ret;
482		return avail;
483	}
484
485	i = 0;
486	list_for_each_entry(entry, &dev->msi_list, list) {
487		entries[i].vector = entry->irq;
488		set_irq_msi(entry->irq, entry);
489		i++;
490	}
491	/* Set MSI-X enabled bits */
492	pci_intx_for_msi(dev, 0);
493	msix_set_enable(dev, 1);
494	dev->msix_enabled = 1;
495
496	return 0;
497}
498
499/**
500 * pci_msi_check_device - check whether MSI may be enabled on a device
501 * @dev: pointer to the pci_dev data structure of MSI device function
502 * @nvec: how many MSIs have been requested ?
503 * @type: are we checking for MSI or MSI-X ?
504 *
505 * Look at global flags, the device itself, and its parent busses
506 * to determine if MSI/-X are supported for the device. If MSI/-X is
507 * supported return 0, else return an error code.
508 **/
509static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
510{
511	struct pci_bus *bus;
512	int ret;
513
514	/* MSI must be globally enabled and supported by the device */
515	if (!pci_msi_enable || !dev || dev->no_msi)
516		return -EINVAL;
517
518	/*
519	 * You can't ask to have 0 or less MSIs configured.
520	 *  a) it's stupid ..
521	 *  b) the list manipulation code assumes nvec >= 1.
522	 */
523	if (nvec < 1)
524		return -ERANGE;
525
526	/* Any bridge which does NOT route MSI transactions from it's
527	 * secondary bus to it's primary bus must set NO_MSI flag on
528	 * the secondary pci_bus.
529	 * We expect only arch-specific PCI host bus controller driver
530	 * or quirks for specific PCI bridges to be setting NO_MSI.
531	 */
532	for (bus = dev->bus; bus; bus = bus->parent)
533		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
534			return -EINVAL;
535
536	ret = arch_msi_check_device(dev, nvec, type);
537	if (ret)
538		return ret;
539
540	if (!pci_find_capability(dev, type))
541		return -EINVAL;
542
543	return 0;
544}
545
546/**
547 * pci_enable_msi - configure device's MSI capability structure
548 * @dev: pointer to the pci_dev data structure of MSI device function
549 *
550 * Setup the MSI capability structure of device function with
551 * a single MSI irq upon its software driver call to request for
552 * MSI mode enabled on its hardware device function. A return of zero
553 * indicates the successful setup of an entry zero with the new MSI
554 * irq or non-zero for otherwise.
555 **/
556int pci_enable_msi(struct pci_dev* dev)
557{
558	int status;
559
560	status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
561	if (status)
562		return status;
563
564	WARN_ON(!!dev->msi_enabled);
565
566	/* Check whether driver already requested for MSI-X irqs */
567	if (dev->msix_enabled) {
568		printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
569			"Device already has MSI-X enabled\n",
570			pci_name(dev));
571		return -EINVAL;
572	}
573	status = msi_capability_init(dev);
574	return status;
575}
576EXPORT_SYMBOL(pci_enable_msi);
577
578void pci_msi_shutdown(struct pci_dev* dev)
579{
580	struct msi_desc *entry;
581
582	if (!pci_msi_enable || !dev || !dev->msi_enabled)
583		return;
584
585	msi_set_enable(dev, 0);
586	pci_intx_for_msi(dev, 1);
587	dev->msi_enabled = 0;
588
589	BUG_ON(list_empty(&dev->msi_list));
590	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
591	/* Return the the pci reset with msi irqs unmasked */
592	if (entry->msi_attrib.maskbit) {
593		u32 mask = entry->msi_attrib.maskbits_mask;
594		msi_set_mask_bits(dev->irq, mask, ~mask);
595	}
596	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
597		return;
598
599	/* Restore dev->irq to its default pin-assertion irq */
600	dev->irq = entry->msi_attrib.default_irq;
601}
602void pci_disable_msi(struct pci_dev* dev)
603{
604	struct msi_desc *entry;
605
606	if (!pci_msi_enable || !dev || !dev->msi_enabled)
607		return;
608
609	pci_msi_shutdown(dev);
610
611	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
612	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
613		return;
614
615	msi_free_irqs(dev);
616}
617EXPORT_SYMBOL(pci_disable_msi);
618
619static int msi_free_irqs(struct pci_dev* dev)
620{
621	struct msi_desc *entry, *tmp;
622
623	list_for_each_entry(entry, &dev->msi_list, list) {
624		if (entry->irq)
625			BUG_ON(irq_has_action(entry->irq));
626	}
627
628	arch_teardown_msi_irqs(dev);
629
630	list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
631		if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
632			writel(1, entry->mask_base + entry->msi_attrib.entry_nr
633				  * PCI_MSIX_ENTRY_SIZE
634				  + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
635
636			if (list_is_last(&entry->list, &dev->msi_list))
637				iounmap(entry->mask_base);
638		}
639		list_del(&entry->list);
640		kfree(entry);
641	}
642
643	return 0;
644}
645
646/**
647 * pci_enable_msix - configure device's MSI-X capability structure
648 * @dev: pointer to the pci_dev data structure of MSI-X device function
649 * @entries: pointer to an array of MSI-X entries
650 * @nvec: number of MSI-X irqs requested for allocation by device driver
651 *
652 * Setup the MSI-X capability structure of device function with the number
653 * of requested irqs upon its software driver call to request for
654 * MSI-X mode enabled on its hardware device function. A return of zero
655 * indicates the successful configuration of MSI-X capability structure
656 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
657 * Or a return of > 0 indicates that driver request is exceeding the number
658 * of irqs available. Driver should use the returned value to re-send
659 * its request.
660 **/
661int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
662{
663	int status, pos, nr_entries;
664	int i, j;
665	u16 control;
666
667	if (!entries)
668 		return -EINVAL;
669
670	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
671	if (status)
672		return status;
673
674	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
675	pci_read_config_word(dev, msi_control_reg(pos), &control);
676	nr_entries = multi_msix_capable(control);
677	if (nvec > nr_entries)
678		return -EINVAL;
679
680	/* Check for any invalid entries */
681	for (i = 0; i < nvec; i++) {
682		if (entries[i].entry >= nr_entries)
683			return -EINVAL;		/* invalid entry */
684		for (j = i + 1; j < nvec; j++) {
685			if (entries[i].entry == entries[j].entry)
686				return -EINVAL;	/* duplicate entry */
687		}
688	}
689	WARN_ON(!!dev->msix_enabled);
690
691	/* Check whether driver already requested for MSI irq */
692   	if (dev->msi_enabled) {
693		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
694		       "Device already has an MSI irq assigned\n",
695		       pci_name(dev));
696		return -EINVAL;
697	}
698	status = msix_capability_init(dev, entries, nvec);
699	return status;
700}
701EXPORT_SYMBOL(pci_enable_msix);
702
703static void msix_free_all_irqs(struct pci_dev *dev)
704{
705	msi_free_irqs(dev);
706}
707
708void pci_msix_shutdown(struct pci_dev* dev)
709{
710	if (!pci_msi_enable || !dev || !dev->msix_enabled)
711		return;
712
713	msix_set_enable(dev, 0);
714	pci_intx_for_msi(dev, 1);
715	dev->msix_enabled = 0;
716}
717void pci_disable_msix(struct pci_dev* dev)
718{
719	if (!pci_msi_enable || !dev || !dev->msix_enabled)
720		return;
721
722	pci_msix_shutdown(dev);
723
724	msix_free_all_irqs(dev);
725}
726EXPORT_SYMBOL(pci_disable_msix);
727
728/**
729 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
730 * @dev: pointer to the pci_dev data structure of MSI(X) device function
731 *
732 * Being called during hotplug remove, from which the device function
733 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
734 * allocated for this device function, are reclaimed to unused state,
735 * which may be used later on.
736 **/
737void msi_remove_pci_irq_vectors(struct pci_dev* dev)
738{
739	if (!pci_msi_enable || !dev)
740 		return;
741
742	if (dev->msi_enabled)
743		msi_free_irqs(dev);
744
745	if (dev->msix_enabled)
746		msix_free_all_irqs(dev);
747}
748
749void pci_no_msi(void)
750{
751	pci_msi_enable = 0;
752}
753
754void pci_msi_init_pci_dev(struct pci_dev *dev)
755{
756	INIT_LIST_HEAD(&dev->msi_list);
757}
758