msi.c revision f2440d9acbe866b917b16cc0f927366341ce9215
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17#include <linux/msi.h>
18#include <linux/smp.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22
23#include "pci.h"
24#include "msi.h"
25
26static int pci_msi_enable = 1;
27
28/* Arch hooks */
29
30#ifndef arch_msi_check_device
31int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{
33	return 0;
34}
35#endif
36
37#ifndef arch_setup_msi_irqs
38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
39{
40	struct msi_desc *entry;
41	int ret;
42
43	list_for_each_entry(entry, &dev->msi_list, list) {
44		ret = arch_setup_msi_irq(dev, entry);
45		if (ret < 0)
46			return ret;
47		if (ret > 0)
48			return -ENOSPC;
49	}
50
51	return 0;
52}
53#endif
54
55#ifndef arch_teardown_msi_irqs
56void arch_teardown_msi_irqs(struct pci_dev *dev)
57{
58	struct msi_desc *entry;
59
60	list_for_each_entry(entry, &dev->msi_list, list) {
61		if (entry->irq != 0)
62			arch_teardown_msi_irq(entry->irq);
63	}
64}
65#endif
66
67static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
68{
69	u16 control;
70
71	if (pos) {
72		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
73		control &= ~PCI_MSI_FLAGS_ENABLE;
74		if (enable)
75			control |= PCI_MSI_FLAGS_ENABLE;
76		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
77	}
78}
79
80static void msi_set_enable(struct pci_dev *dev, int enable)
81{
82	__msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
83}
84
85static void msix_set_enable(struct pci_dev *dev, int enable)
86{
87	int pos;
88	u16 control;
89
90	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
91	if (pos) {
92		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
93		control &= ~PCI_MSIX_FLAGS_ENABLE;
94		if (enable)
95			control |= PCI_MSIX_FLAGS_ENABLE;
96		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
97	}
98}
99
100static inline __attribute_const__ u32 msi_mask(unsigned x)
101{
102	/* Don't shift by >= width of type */
103	if (x >= 5)
104		return 0xffffffff;
105	return (1 << (1 << x)) - 1;
106}
107
108static inline __attribute_const__ u32 msi_capable_mask(u16 control)
109{
110	return msi_mask((control >> 1) & 7);
111}
112
113static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
114{
115	return msi_mask((control >> 4) & 7);
116}
117
118/*
119 * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
120 * mask all MSI interrupts by clearing the MSI enable bit does not work
121 * reliably as devices without an INTx disable bit will then generate a
122 * level IRQ which will never be cleared.
123 *
124 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
125 * doesn't support MSI masking.
126 */
127static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
128{
129	u32 mask_bits = desc->masked;
130
131	if (!desc->msi_attrib.maskbit)
132		return;
133
134	mask_bits &= ~mask;
135	mask_bits |= flag;
136	pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
137	desc->masked = mask_bits;
138}
139
140/*
141 * This internal function does not flush PCI writes to the device.
142 * All users must ensure that they read from the device before either
143 * assuming that the device state is up to date, or returning out of this
144 * file.  This saves a few milliseconds when initialising devices with lots
145 * of MSI-X interrupts.
146 */
147static void msix_mask_irq(struct msi_desc *desc, u32 flag)
148{
149	u32 mask_bits = desc->masked;
150	unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
151					PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
152	mask_bits &= ~1;
153	mask_bits |= flag;
154	writel(mask_bits, desc->mask_base + offset);
155	desc->masked = mask_bits;
156}
157
158static void msi_set_mask_bit(unsigned irq, u32 flag)
159{
160	struct msi_desc *desc = get_irq_msi(irq);
161
162	if (desc->msi_attrib.is_msix) {
163		msix_mask_irq(desc, flag);
164		readl(desc->mask_base);		/* Flush write to device */
165	} else {
166		msi_mask_irq(desc, 1, flag);
167	}
168}
169
170void mask_msi_irq(unsigned int irq)
171{
172	msi_set_mask_bit(irq, 1);
173}
174
175void unmask_msi_irq(unsigned int irq)
176{
177	msi_set_mask_bit(irq, 0);
178}
179
180void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
181{
182	struct msi_desc *entry = get_irq_desc_msi(desc);
183	if (entry->msi_attrib.is_msix) {
184		void __iomem *base = entry->mask_base +
185			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
186
187		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
188		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
189		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
190	} else {
191		struct pci_dev *dev = entry->dev;
192		int pos = entry->msi_attrib.pos;
193		u16 data;
194
195		pci_read_config_dword(dev, msi_lower_address_reg(pos),
196					&msg->address_lo);
197		if (entry->msi_attrib.is_64) {
198			pci_read_config_dword(dev, msi_upper_address_reg(pos),
199						&msg->address_hi);
200			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
201		} else {
202			msg->address_hi = 0;
203			pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
204		}
205		msg->data = data;
206	}
207}
208
209void read_msi_msg(unsigned int irq, struct msi_msg *msg)
210{
211	struct irq_desc *desc = irq_to_desc(irq);
212
213	read_msi_msg_desc(desc, msg);
214}
215
216void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
217{
218	struct msi_desc *entry = get_irq_desc_msi(desc);
219	if (entry->msi_attrib.is_msix) {
220		void __iomem *base;
221		base = entry->mask_base +
222			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
223
224		writel(msg->address_lo,
225			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
226		writel(msg->address_hi,
227			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
228		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
229	} else {
230		struct pci_dev *dev = entry->dev;
231		int pos = entry->msi_attrib.pos;
232
233		pci_write_config_dword(dev, msi_lower_address_reg(pos),
234					msg->address_lo);
235		if (entry->msi_attrib.is_64) {
236			pci_write_config_dword(dev, msi_upper_address_reg(pos),
237						msg->address_hi);
238			pci_write_config_word(dev, msi_data_reg(pos, 1),
239						msg->data);
240		} else {
241			pci_write_config_word(dev, msi_data_reg(pos, 0),
242						msg->data);
243		}
244	}
245	entry->msg = *msg;
246}
247
248void write_msi_msg(unsigned int irq, struct msi_msg *msg)
249{
250	struct irq_desc *desc = irq_to_desc(irq);
251
252	write_msi_msg_desc(desc, msg);
253}
254
255static int msi_free_irqs(struct pci_dev* dev);
256
257static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
258{
259	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
260	if (!desc)
261		return NULL;
262
263	INIT_LIST_HEAD(&desc->list);
264	desc->dev = dev;
265
266	return desc;
267}
268
269static void pci_intx_for_msi(struct pci_dev *dev, int enable)
270{
271	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
272		pci_intx(dev, enable);
273}
274
275static void __pci_restore_msi_state(struct pci_dev *dev)
276{
277	int pos;
278	u16 control;
279	struct msi_desc *entry;
280
281	if (!dev->msi_enabled)
282		return;
283
284	entry = get_irq_msi(dev->irq);
285	pos = entry->msi_attrib.pos;
286
287	pci_intx_for_msi(dev, 0);
288	msi_set_enable(dev, 0);
289	write_msi_msg(dev->irq, &entry->msg);
290
291	pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
292	msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
293	control &= ~PCI_MSI_FLAGS_QSIZE;
294	control |= PCI_MSI_FLAGS_ENABLE;
295	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
296}
297
298static void __pci_restore_msix_state(struct pci_dev *dev)
299{
300	int pos;
301	struct msi_desc *entry;
302	u16 control;
303
304	if (!dev->msix_enabled)
305		return;
306
307	/* route the table */
308	pci_intx_for_msi(dev, 0);
309	msix_set_enable(dev, 0);
310
311	list_for_each_entry(entry, &dev->msi_list, list) {
312		write_msi_msg(entry->irq, &entry->msg);
313		msix_mask_irq(entry, entry->masked);
314	}
315
316	BUG_ON(list_empty(&dev->msi_list));
317	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
318	pos = entry->msi_attrib.pos;
319	pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
320	control &= ~PCI_MSIX_FLAGS_MASKALL;
321	control |= PCI_MSIX_FLAGS_ENABLE;
322	pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
323}
324
325void pci_restore_msi_state(struct pci_dev *dev)
326{
327	__pci_restore_msi_state(dev);
328	__pci_restore_msix_state(dev);
329}
330EXPORT_SYMBOL_GPL(pci_restore_msi_state);
331
332/**
333 * msi_capability_init - configure device's MSI capability structure
334 * @dev: pointer to the pci_dev data structure of MSI device function
335 *
336 * Setup the MSI capability structure of device function with a single
337 * MSI irq, regardless of device function is capable of handling
338 * multiple messages. A return of zero indicates the successful setup
339 * of an entry zero with the new MSI irq or non-zero for otherwise.
340 **/
341static int msi_capability_init(struct pci_dev *dev)
342{
343	struct msi_desc *entry;
344	int pos, ret;
345	u16 control;
346	unsigned mask;
347
348	msi_set_enable(dev, 0);	/* Ensure msi is disabled as I set it up */
349
350   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
351	pci_read_config_word(dev, msi_control_reg(pos), &control);
352	/* MSI Entry Initialization */
353	entry = alloc_msi_entry(dev);
354	if (!entry)
355		return -ENOMEM;
356
357	entry->msi_attrib.is_msix = 0;
358	entry->msi_attrib.is_64 = is_64bit_address(control);
359	entry->msi_attrib.entry_nr = 0;
360	entry->msi_attrib.maskbit = is_mask_bit_support(control);
361	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
362	entry->msi_attrib.pos = pos;
363
364	entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
365	/* All MSIs are unmasked by default, Mask them all */
366	if (entry->msi_attrib.maskbit)
367		pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
368	mask = msi_capable_mask(control);
369	msi_mask_irq(entry, mask, mask);
370
371	list_add_tail(&entry->list, &dev->msi_list);
372
373	/* Configure MSI capability structure */
374	ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
375	if (ret) {
376		msi_free_irqs(dev);
377		return ret;
378	}
379
380	/* Set MSI enabled bits	 */
381	pci_intx_for_msi(dev, 0);
382	msi_set_enable(dev, 1);
383	dev->msi_enabled = 1;
384
385	dev->irq = entry->irq;
386	return 0;
387}
388
389/**
390 * msix_capability_init - configure device's MSI-X capability
391 * @dev: pointer to the pci_dev data structure of MSI-X device function
392 * @entries: pointer to an array of struct msix_entry entries
393 * @nvec: number of @entries
394 *
395 * Setup the MSI-X capability structure of device function with a
396 * single MSI-X irq. A return of zero indicates the successful setup of
397 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
398 **/
399static int msix_capability_init(struct pci_dev *dev,
400				struct msix_entry *entries, int nvec)
401{
402	struct msi_desc *entry;
403	int pos, i, j, nr_entries, ret;
404	unsigned long phys_addr;
405	u32 table_offset;
406 	u16 control;
407	u8 bir;
408	void __iomem *base;
409
410	msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
411
412   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
413	/* Request & Map MSI-X table region */
414 	pci_read_config_word(dev, msi_control_reg(pos), &control);
415	nr_entries = multi_msix_capable(control);
416
417 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
418	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
419	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
420	phys_addr = pci_resource_start (dev, bir) + table_offset;
421	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
422	if (base == NULL)
423		return -ENOMEM;
424
425	/* MSI-X Table Initialization */
426	for (i = 0; i < nvec; i++) {
427		entry = alloc_msi_entry(dev);
428		if (!entry)
429			break;
430
431 		j = entries[i].entry;
432		entry->msi_attrib.is_msix = 1;
433		entry->msi_attrib.is_64 = 1;
434		entry->msi_attrib.entry_nr = j;
435		entry->msi_attrib.default_irq = dev->irq;
436		entry->msi_attrib.pos = pos;
437		entry->mask_base = base;
438		entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
439					PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
440		msix_mask_irq(entry, 1);
441
442		list_add_tail(&entry->list, &dev->msi_list);
443	}
444
445	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
446	if (ret < 0) {
447		/* If we had some success report the number of irqs
448		 * we succeeded in setting up. */
449		int avail = 0;
450		list_for_each_entry(entry, &dev->msi_list, list) {
451			if (entry->irq != 0) {
452				avail++;
453			}
454		}
455
456		if (avail != 0)
457			ret = avail;
458	}
459
460	if (ret) {
461		msi_free_irqs(dev);
462		return ret;
463	}
464
465	i = 0;
466	list_for_each_entry(entry, &dev->msi_list, list) {
467		entries[i].vector = entry->irq;
468		set_irq_msi(entry->irq, entry);
469		i++;
470	}
471	/* Set MSI-X enabled bits */
472	pci_intx_for_msi(dev, 0);
473	msix_set_enable(dev, 1);
474	dev->msix_enabled = 1;
475
476	return 0;
477}
478
479/**
480 * pci_msi_check_device - check whether MSI may be enabled on a device
481 * @dev: pointer to the pci_dev data structure of MSI device function
482 * @nvec: how many MSIs have been requested ?
483 * @type: are we checking for MSI or MSI-X ?
484 *
485 * Look at global flags, the device itself, and its parent busses
486 * to determine if MSI/-X are supported for the device. If MSI/-X is
487 * supported return 0, else return an error code.
488 **/
489static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
490{
491	struct pci_bus *bus;
492	int ret;
493
494	/* MSI must be globally enabled and supported by the device */
495	if (!pci_msi_enable || !dev || dev->no_msi)
496		return -EINVAL;
497
498	/*
499	 * You can't ask to have 0 or less MSIs configured.
500	 *  a) it's stupid ..
501	 *  b) the list manipulation code assumes nvec >= 1.
502	 */
503	if (nvec < 1)
504		return -ERANGE;
505
506	/* Any bridge which does NOT route MSI transactions from it's
507	 * secondary bus to it's primary bus must set NO_MSI flag on
508	 * the secondary pci_bus.
509	 * We expect only arch-specific PCI host bus controller driver
510	 * or quirks for specific PCI bridges to be setting NO_MSI.
511	 */
512	for (bus = dev->bus; bus; bus = bus->parent)
513		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
514			return -EINVAL;
515
516	ret = arch_msi_check_device(dev, nvec, type);
517	if (ret)
518		return ret;
519
520	if (!pci_find_capability(dev, type))
521		return -EINVAL;
522
523	return 0;
524}
525
526/**
527 * pci_enable_msi - configure device's MSI capability structure
528 * @dev: pointer to the pci_dev data structure of MSI device function
529 *
530 * Setup the MSI capability structure of device function with
531 * a single MSI irq upon its software driver call to request for
532 * MSI mode enabled on its hardware device function. A return of zero
533 * indicates the successful setup of an entry zero with the new MSI
534 * irq or non-zero for otherwise.
535 **/
536int pci_enable_msi(struct pci_dev* dev)
537{
538	int status;
539
540	status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
541	if (status)
542		return status;
543
544	WARN_ON(!!dev->msi_enabled);
545
546	/* Check whether driver already requested for MSI-X irqs */
547	if (dev->msix_enabled) {
548		dev_info(&dev->dev, "can't enable MSI "
549			 "(MSI-X already enabled)\n");
550		return -EINVAL;
551	}
552	status = msi_capability_init(dev);
553	return status;
554}
555EXPORT_SYMBOL(pci_enable_msi);
556
557void pci_msi_shutdown(struct pci_dev *dev)
558{
559	struct msi_desc *desc;
560	u32 mask;
561	u16 ctrl;
562
563	if (!pci_msi_enable || !dev || !dev->msi_enabled)
564		return;
565
566	msi_set_enable(dev, 0);
567	pci_intx_for_msi(dev, 1);
568	dev->msi_enabled = 0;
569
570	BUG_ON(list_empty(&dev->msi_list));
571	desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
572	pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
573	mask = msi_capable_mask(ctrl);
574	msi_mask_irq(desc, mask, ~mask);
575
576	/* Restore dev->irq to its default pin-assertion irq */
577	dev->irq = desc->msi_attrib.default_irq;
578}
579
580void pci_disable_msi(struct pci_dev* dev)
581{
582	struct msi_desc *entry;
583
584	if (!pci_msi_enable || !dev || !dev->msi_enabled)
585		return;
586
587	pci_msi_shutdown(dev);
588
589	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
590	if (entry->msi_attrib.is_msix)
591		return;
592
593	msi_free_irqs(dev);
594}
595EXPORT_SYMBOL(pci_disable_msi);
596
597static int msi_free_irqs(struct pci_dev* dev)
598{
599	struct msi_desc *entry, *tmp;
600
601	list_for_each_entry(entry, &dev->msi_list, list) {
602		if (entry->irq)
603			BUG_ON(irq_has_action(entry->irq));
604	}
605
606	arch_teardown_msi_irqs(dev);
607
608	list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
609		if (entry->msi_attrib.is_msix) {
610			writel(1, entry->mask_base + entry->msi_attrib.entry_nr
611				  * PCI_MSIX_ENTRY_SIZE
612				  + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
613
614			if (list_is_last(&entry->list, &dev->msi_list))
615				iounmap(entry->mask_base);
616		}
617		list_del(&entry->list);
618		kfree(entry);
619	}
620
621	return 0;
622}
623
624/**
625 * pci_msix_table_size - return the number of device's MSI-X table entries
626 * @dev: pointer to the pci_dev data structure of MSI-X device function
627 */
628int pci_msix_table_size(struct pci_dev *dev)
629{
630	int pos;
631	u16 control;
632
633	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
634	if (!pos)
635		return 0;
636
637	pci_read_config_word(dev, msi_control_reg(pos), &control);
638	return multi_msix_capable(control);
639}
640
641/**
642 * pci_enable_msix - configure device's MSI-X capability structure
643 * @dev: pointer to the pci_dev data structure of MSI-X device function
644 * @entries: pointer to an array of MSI-X entries
645 * @nvec: number of MSI-X irqs requested for allocation by device driver
646 *
647 * Setup the MSI-X capability structure of device function with the number
648 * of requested irqs upon its software driver call to request for
649 * MSI-X mode enabled on its hardware device function. A return of zero
650 * indicates the successful configuration of MSI-X capability structure
651 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
652 * Or a return of > 0 indicates that driver request is exceeding the number
653 * of irqs available. Driver should use the returned value to re-send
654 * its request.
655 **/
656int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
657{
658	int status, nr_entries;
659	int i, j;
660
661	if (!entries)
662 		return -EINVAL;
663
664	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
665	if (status)
666		return status;
667
668	nr_entries = pci_msix_table_size(dev);
669	if (nvec > nr_entries)
670		return -EINVAL;
671
672	/* Check for any invalid entries */
673	for (i = 0; i < nvec; i++) {
674		if (entries[i].entry >= nr_entries)
675			return -EINVAL;		/* invalid entry */
676		for (j = i + 1; j < nvec; j++) {
677			if (entries[i].entry == entries[j].entry)
678				return -EINVAL;	/* duplicate entry */
679		}
680	}
681	WARN_ON(!!dev->msix_enabled);
682
683	/* Check whether driver already requested for MSI irq */
684   	if (dev->msi_enabled) {
685		dev_info(&dev->dev, "can't enable MSI-X "
686		       "(MSI IRQ already assigned)\n");
687		return -EINVAL;
688	}
689	status = msix_capability_init(dev, entries, nvec);
690	return status;
691}
692EXPORT_SYMBOL(pci_enable_msix);
693
694static void msix_free_all_irqs(struct pci_dev *dev)
695{
696	msi_free_irqs(dev);
697}
698
699void pci_msix_shutdown(struct pci_dev* dev)
700{
701	if (!pci_msi_enable || !dev || !dev->msix_enabled)
702		return;
703
704	msix_set_enable(dev, 0);
705	pci_intx_for_msi(dev, 1);
706	dev->msix_enabled = 0;
707}
708void pci_disable_msix(struct pci_dev* dev)
709{
710	if (!pci_msi_enable || !dev || !dev->msix_enabled)
711		return;
712
713	pci_msix_shutdown(dev);
714
715	msix_free_all_irqs(dev);
716}
717EXPORT_SYMBOL(pci_disable_msix);
718
719/**
720 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
721 * @dev: pointer to the pci_dev data structure of MSI(X) device function
722 *
723 * Being called during hotplug remove, from which the device function
724 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
725 * allocated for this device function, are reclaimed to unused state,
726 * which may be used later on.
727 **/
728void msi_remove_pci_irq_vectors(struct pci_dev* dev)
729{
730	if (!pci_msi_enable || !dev)
731 		return;
732
733	if (dev->msi_enabled)
734		msi_free_irqs(dev);
735
736	if (dev->msix_enabled)
737		msix_free_all_irqs(dev);
738}
739
740void pci_no_msi(void)
741{
742	pci_msi_enable = 0;
743}
744
745/**
746 * pci_msi_enabled - is MSI enabled?
747 *
748 * Returns true if MSI has not been disabled by the command-line option
749 * pci=nomsi.
750 **/
751int pci_msi_enabled(void)
752{
753	return pci_msi_enable;
754}
755EXPORT_SYMBOL(pci_msi_enabled);
756
757void pci_msi_init_pci_dev(struct pci_dev *dev)
758{
759	INIT_LIST_HEAD(&dev->msi_list);
760}
761