msi.c revision d52877c7b1afb8c37ebe17e2005040b79cb618b0
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17#include <linux/msi.h>
18#include <linux/smp.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22
23#include "pci.h"
24#include "msi.h"
25
26static int pci_msi_enable = 1;
27
28/* Arch hooks */
29
30int __attribute__ ((weak))
31arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{
33	return 0;
34}
35
36int __attribute__ ((weak))
37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
38{
39	return 0;
40}
41
42int __attribute__ ((weak))
43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44{
45	struct msi_desc *entry;
46	int ret;
47
48	list_for_each_entry(entry, &dev->msi_list, list) {
49		ret = arch_setup_msi_irq(dev, entry);
50		if (ret)
51			return ret;
52	}
53
54	return 0;
55}
56
57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
58{
59	return;
60}
61
62void __attribute__ ((weak))
63arch_teardown_msi_irqs(struct pci_dev *dev)
64{
65	struct msi_desc *entry;
66
67	list_for_each_entry(entry, &dev->msi_list, list) {
68		if (entry->irq != 0)
69			arch_teardown_msi_irq(entry->irq);
70	}
71}
72
73static void msi_set_enable(struct pci_dev *dev, int enable)
74{
75	int pos;
76	u16 control;
77
78	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
79	if (pos) {
80		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
81		control &= ~PCI_MSI_FLAGS_ENABLE;
82		if (enable)
83			control |= PCI_MSI_FLAGS_ENABLE;
84		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
85	}
86}
87
88static void msix_set_enable(struct pci_dev *dev, int enable)
89{
90	int pos;
91	u16 control;
92
93	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
94	if (pos) {
95		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
96		control &= ~PCI_MSIX_FLAGS_ENABLE;
97		if (enable)
98			control |= PCI_MSIX_FLAGS_ENABLE;
99		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
100	}
101}
102
103static void msix_flush_writes(unsigned int irq)
104{
105	struct msi_desc *entry;
106
107	entry = get_irq_msi(irq);
108	BUG_ON(!entry || !entry->dev);
109	switch (entry->msi_attrib.type) {
110	case PCI_CAP_ID_MSI:
111		/* nothing to do */
112		break;
113	case PCI_CAP_ID_MSIX:
114	{
115		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
116			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
117		readl(entry->mask_base + offset);
118		break;
119	}
120	default:
121		BUG();
122		break;
123	}
124}
125
126static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
127{
128	struct msi_desc *entry;
129
130	entry = get_irq_msi(irq);
131	BUG_ON(!entry || !entry->dev);
132	switch (entry->msi_attrib.type) {
133	case PCI_CAP_ID_MSI:
134		if (entry->msi_attrib.maskbit) {
135			int pos;
136			u32 mask_bits;
137
138			pos = (long)entry->mask_base;
139			pci_read_config_dword(entry->dev, pos, &mask_bits);
140			mask_bits &= ~(mask);
141			mask_bits |= flag & mask;
142			pci_write_config_dword(entry->dev, pos, mask_bits);
143		} else {
144			msi_set_enable(entry->dev, !flag);
145		}
146		break;
147	case PCI_CAP_ID_MSIX:
148	{
149		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
150			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
151		writel(flag, entry->mask_base + offset);
152		readl(entry->mask_base + offset);
153		break;
154	}
155	default:
156		BUG();
157		break;
158	}
159	entry->msi_attrib.masked = !!flag;
160}
161
162void read_msi_msg(unsigned int irq, struct msi_msg *msg)
163{
164	struct msi_desc *entry = get_irq_msi(irq);
165	switch(entry->msi_attrib.type) {
166	case PCI_CAP_ID_MSI:
167	{
168		struct pci_dev *dev = entry->dev;
169		int pos = entry->msi_attrib.pos;
170		u16 data;
171
172		pci_read_config_dword(dev, msi_lower_address_reg(pos),
173					&msg->address_lo);
174		if (entry->msi_attrib.is_64) {
175			pci_read_config_dword(dev, msi_upper_address_reg(pos),
176						&msg->address_hi);
177			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
178		} else {
179			msg->address_hi = 0;
180			pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
181		}
182		msg->data = data;
183		break;
184	}
185	case PCI_CAP_ID_MSIX:
186	{
187		void __iomem *base;
188		base = entry->mask_base +
189			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
190
191		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
192		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
193		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
194 		break;
195 	}
196 	default:
197		BUG();
198	}
199}
200
201void write_msi_msg(unsigned int irq, struct msi_msg *msg)
202{
203	struct msi_desc *entry = get_irq_msi(irq);
204	switch (entry->msi_attrib.type) {
205	case PCI_CAP_ID_MSI:
206	{
207		struct pci_dev *dev = entry->dev;
208		int pos = entry->msi_attrib.pos;
209
210		pci_write_config_dword(dev, msi_lower_address_reg(pos),
211					msg->address_lo);
212		if (entry->msi_attrib.is_64) {
213			pci_write_config_dword(dev, msi_upper_address_reg(pos),
214						msg->address_hi);
215			pci_write_config_word(dev, msi_data_reg(pos, 1),
216						msg->data);
217		} else {
218			pci_write_config_word(dev, msi_data_reg(pos, 0),
219						msg->data);
220		}
221		break;
222	}
223	case PCI_CAP_ID_MSIX:
224	{
225		void __iomem *base;
226		base = entry->mask_base +
227			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
228
229		writel(msg->address_lo,
230			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
231		writel(msg->address_hi,
232			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
233		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
234		break;
235	}
236	default:
237		BUG();
238	}
239	entry->msg = *msg;
240}
241
242void mask_msi_irq(unsigned int irq)
243{
244	msi_set_mask_bits(irq, 1, 1);
245	msix_flush_writes(irq);
246}
247
248void unmask_msi_irq(unsigned int irq)
249{
250	msi_set_mask_bits(irq, 1, 0);
251	msix_flush_writes(irq);
252}
253
254static int msi_free_irqs(struct pci_dev* dev);
255
256
257static struct msi_desc* alloc_msi_entry(void)
258{
259	struct msi_desc *entry;
260
261	entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
262	if (!entry)
263		return NULL;
264
265	INIT_LIST_HEAD(&entry->list);
266	entry->irq = 0;
267	entry->dev = NULL;
268
269	return entry;
270}
271
272static void pci_intx_for_msi(struct pci_dev *dev, int enable)
273{
274	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
275		pci_intx(dev, enable);
276}
277
278static void __pci_restore_msi_state(struct pci_dev *dev)
279{
280	int pos;
281	u16 control;
282	struct msi_desc *entry;
283
284	if (!dev->msi_enabled)
285		return;
286
287	entry = get_irq_msi(dev->irq);
288	pos = entry->msi_attrib.pos;
289
290	pci_intx_for_msi(dev, 0);
291	msi_set_enable(dev, 0);
292	write_msi_msg(dev->irq, &entry->msg);
293	if (entry->msi_attrib.maskbit)
294		msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask,
295				  entry->msi_attrib.masked);
296
297	pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
298	control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
299	if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked)
300		control |= PCI_MSI_FLAGS_ENABLE;
301	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
302}
303
304static void __pci_restore_msix_state(struct pci_dev *dev)
305{
306	int pos;
307	struct msi_desc *entry;
308	u16 control;
309
310	if (!dev->msix_enabled)
311		return;
312
313	/* route the table */
314	pci_intx_for_msi(dev, 0);
315	msix_set_enable(dev, 0);
316
317	list_for_each_entry(entry, &dev->msi_list, list) {
318		write_msi_msg(entry->irq, &entry->msg);
319		msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked);
320	}
321
322	BUG_ON(list_empty(&dev->msi_list));
323	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
324	pos = entry->msi_attrib.pos;
325	pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
326	control &= ~PCI_MSIX_FLAGS_MASKALL;
327	control |= PCI_MSIX_FLAGS_ENABLE;
328	pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
329}
330
331void pci_restore_msi_state(struct pci_dev *dev)
332{
333	__pci_restore_msi_state(dev);
334	__pci_restore_msix_state(dev);
335}
336EXPORT_SYMBOL_GPL(pci_restore_msi_state);
337
338/**
339 * msi_capability_init - configure device's MSI capability structure
340 * @dev: pointer to the pci_dev data structure of MSI device function
341 *
342 * Setup the MSI capability structure of device function with a single
343 * MSI irq, regardless of device function is capable of handling
344 * multiple messages. A return of zero indicates the successful setup
345 * of an entry zero with the new MSI irq or non-zero for otherwise.
346 **/
347static int msi_capability_init(struct pci_dev *dev)
348{
349	struct msi_desc *entry;
350	int pos, ret;
351	u16 control;
352
353	msi_set_enable(dev, 0);	/* Ensure msi is disabled as I set it up */
354
355   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
356	pci_read_config_word(dev, msi_control_reg(pos), &control);
357	/* MSI Entry Initialization */
358	entry = alloc_msi_entry();
359	if (!entry)
360		return -ENOMEM;
361
362	entry->msi_attrib.type = PCI_CAP_ID_MSI;
363	entry->msi_attrib.is_64 = is_64bit_address(control);
364	entry->msi_attrib.entry_nr = 0;
365	entry->msi_attrib.maskbit = is_mask_bit_support(control);
366	entry->msi_attrib.masked = 1;
367	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
368	entry->msi_attrib.pos = pos;
369	if (is_mask_bit_support(control)) {
370		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
371				is_64bit_address(control));
372	}
373	entry->dev = dev;
374	if (entry->msi_attrib.maskbit) {
375		unsigned int maskbits, temp;
376		/* All MSIs are unmasked by default, Mask them all */
377		pci_read_config_dword(dev,
378			msi_mask_bits_reg(pos, is_64bit_address(control)),
379			&maskbits);
380		temp = (1 << multi_msi_capable(control));
381		temp = ((temp - 1) & ~temp);
382		maskbits |= temp;
383		pci_write_config_dword(dev,
384			msi_mask_bits_reg(pos, is_64bit_address(control)),
385			maskbits);
386		entry->msi_attrib.maskbits_mask = temp;
387	}
388	list_add_tail(&entry->list, &dev->msi_list);
389
390	/* Configure MSI capability structure */
391	ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
392	if (ret) {
393		msi_free_irqs(dev);
394		return ret;
395	}
396
397	/* Set MSI enabled bits	 */
398	pci_intx_for_msi(dev, 0);
399	msi_set_enable(dev, 1);
400	dev->msi_enabled = 1;
401
402	dev->irq = entry->irq;
403	return 0;
404}
405
406/**
407 * msix_capability_init - configure device's MSI-X capability
408 * @dev: pointer to the pci_dev data structure of MSI-X device function
409 * @entries: pointer to an array of struct msix_entry entries
410 * @nvec: number of @entries
411 *
412 * Setup the MSI-X capability structure of device function with a
413 * single MSI-X irq. A return of zero indicates the successful setup of
414 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
415 **/
416static int msix_capability_init(struct pci_dev *dev,
417				struct msix_entry *entries, int nvec)
418{
419	struct msi_desc *entry;
420	int pos, i, j, nr_entries, ret;
421	unsigned long phys_addr;
422	u32 table_offset;
423 	u16 control;
424	u8 bir;
425	void __iomem *base;
426
427	msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
428
429   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
430	/* Request & Map MSI-X table region */
431 	pci_read_config_word(dev, msi_control_reg(pos), &control);
432	nr_entries = multi_msix_capable(control);
433
434 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
435	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
436	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
437	phys_addr = pci_resource_start (dev, bir) + table_offset;
438	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
439	if (base == NULL)
440		return -ENOMEM;
441
442	/* MSI-X Table Initialization */
443	for (i = 0; i < nvec; i++) {
444		entry = alloc_msi_entry();
445		if (!entry)
446			break;
447
448 		j = entries[i].entry;
449		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
450		entry->msi_attrib.is_64 = 1;
451		entry->msi_attrib.entry_nr = j;
452		entry->msi_attrib.maskbit = 1;
453		entry->msi_attrib.masked = 1;
454		entry->msi_attrib.default_irq = dev->irq;
455		entry->msi_attrib.pos = pos;
456		entry->dev = dev;
457		entry->mask_base = base;
458
459		list_add_tail(&entry->list, &dev->msi_list);
460	}
461
462	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
463	if (ret) {
464		int avail = 0;
465		list_for_each_entry(entry, &dev->msi_list, list) {
466			if (entry->irq != 0) {
467				avail++;
468			}
469		}
470
471		msi_free_irqs(dev);
472
473		/* If we had some success report the number of irqs
474		 * we succeeded in setting up.
475		 */
476		if (avail == 0)
477			avail = ret;
478		return avail;
479	}
480
481	i = 0;
482	list_for_each_entry(entry, &dev->msi_list, list) {
483		entries[i].vector = entry->irq;
484		set_irq_msi(entry->irq, entry);
485		i++;
486	}
487	/* Set MSI-X enabled bits */
488	pci_intx_for_msi(dev, 0);
489	msix_set_enable(dev, 1);
490	dev->msix_enabled = 1;
491
492	return 0;
493}
494
495/**
496 * pci_msi_check_device - check whether MSI may be enabled on a device
497 * @dev: pointer to the pci_dev data structure of MSI device function
498 * @nvec: how many MSIs have been requested ?
499 * @type: are we checking for MSI or MSI-X ?
500 *
501 * Look at global flags, the device itself, and its parent busses
502 * to determine if MSI/-X are supported for the device. If MSI/-X is
503 * supported return 0, else return an error code.
504 **/
505static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
506{
507	struct pci_bus *bus;
508	int ret;
509
510	/* MSI must be globally enabled and supported by the device */
511	if (!pci_msi_enable || !dev || dev->no_msi)
512		return -EINVAL;
513
514	/*
515	 * You can't ask to have 0 or less MSIs configured.
516	 *  a) it's stupid ..
517	 *  b) the list manipulation code assumes nvec >= 1.
518	 */
519	if (nvec < 1)
520		return -ERANGE;
521
522	/* Any bridge which does NOT route MSI transactions from it's
523	 * secondary bus to it's primary bus must set NO_MSI flag on
524	 * the secondary pci_bus.
525	 * We expect only arch-specific PCI host bus controller driver
526	 * or quirks for specific PCI bridges to be setting NO_MSI.
527	 */
528	for (bus = dev->bus; bus; bus = bus->parent)
529		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
530			return -EINVAL;
531
532	ret = arch_msi_check_device(dev, nvec, type);
533	if (ret)
534		return ret;
535
536	if (!pci_find_capability(dev, type))
537		return -EINVAL;
538
539	return 0;
540}
541
542/**
543 * pci_enable_msi - configure device's MSI capability structure
544 * @dev: pointer to the pci_dev data structure of MSI device function
545 *
546 * Setup the MSI capability structure of device function with
547 * a single MSI irq upon its software driver call to request for
548 * MSI mode enabled on its hardware device function. A return of zero
549 * indicates the successful setup of an entry zero with the new MSI
550 * irq or non-zero for otherwise.
551 **/
552int pci_enable_msi(struct pci_dev* dev)
553{
554	int status;
555
556	status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
557	if (status)
558		return status;
559
560	WARN_ON(!!dev->msi_enabled);
561
562	/* Check whether driver already requested for MSI-X irqs */
563	if (dev->msix_enabled) {
564		printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
565			"Device already has MSI-X enabled\n",
566			pci_name(dev));
567		return -EINVAL;
568	}
569	status = msi_capability_init(dev);
570	return status;
571}
572EXPORT_SYMBOL(pci_enable_msi);
573
574void pci_msi_shutdown(struct pci_dev* dev)
575{
576	struct msi_desc *entry;
577
578	if (!pci_msi_enable || !dev || !dev->msi_enabled)
579		return;
580
581	msi_set_enable(dev, 0);
582	pci_intx_for_msi(dev, 1);
583	dev->msi_enabled = 0;
584
585	BUG_ON(list_empty(&dev->msi_list));
586	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
587	/* Return the the pci reset with msi irqs unmasked */
588	if (entry->msi_attrib.maskbit) {
589		u32 mask = entry->msi_attrib.maskbits_mask;
590		msi_set_mask_bits(dev->irq, mask, ~mask);
591	}
592	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
593		return;
594
595	/* Restore dev->irq to its default pin-assertion irq */
596	dev->irq = entry->msi_attrib.default_irq;
597}
598void pci_disable_msi(struct pci_dev* dev)
599{
600	struct msi_desc *entry;
601
602	if (!pci_msi_enable || !dev || !dev->msi_enabled)
603		return;
604
605	pci_msi_shutdown(dev);
606
607	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
608	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
609		return;
610
611	msi_free_irqs(dev);
612}
613EXPORT_SYMBOL(pci_disable_msi);
614
615static int msi_free_irqs(struct pci_dev* dev)
616{
617	struct msi_desc *entry, *tmp;
618
619	list_for_each_entry(entry, &dev->msi_list, list) {
620		if (entry->irq)
621			BUG_ON(irq_has_action(entry->irq));
622	}
623
624	arch_teardown_msi_irqs(dev);
625
626	list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
627		if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
628			writel(1, entry->mask_base + entry->msi_attrib.entry_nr
629				  * PCI_MSIX_ENTRY_SIZE
630				  + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
631
632			if (list_is_last(&entry->list, &dev->msi_list))
633				iounmap(entry->mask_base);
634		}
635		list_del(&entry->list);
636		kfree(entry);
637	}
638
639	return 0;
640}
641
642/**
643 * pci_enable_msix - configure device's MSI-X capability structure
644 * @dev: pointer to the pci_dev data structure of MSI-X device function
645 * @entries: pointer to an array of MSI-X entries
646 * @nvec: number of MSI-X irqs requested for allocation by device driver
647 *
648 * Setup the MSI-X capability structure of device function with the number
649 * of requested irqs upon its software driver call to request for
650 * MSI-X mode enabled on its hardware device function. A return of zero
651 * indicates the successful configuration of MSI-X capability structure
652 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
653 * Or a return of > 0 indicates that driver request is exceeding the number
654 * of irqs available. Driver should use the returned value to re-send
655 * its request.
656 **/
657int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
658{
659	int status, pos, nr_entries;
660	int i, j;
661	u16 control;
662
663	if (!entries)
664 		return -EINVAL;
665
666	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
667	if (status)
668		return status;
669
670	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
671	pci_read_config_word(dev, msi_control_reg(pos), &control);
672	nr_entries = multi_msix_capable(control);
673	if (nvec > nr_entries)
674		return -EINVAL;
675
676	/* Check for any invalid entries */
677	for (i = 0; i < nvec; i++) {
678		if (entries[i].entry >= nr_entries)
679			return -EINVAL;		/* invalid entry */
680		for (j = i + 1; j < nvec; j++) {
681			if (entries[i].entry == entries[j].entry)
682				return -EINVAL;	/* duplicate entry */
683		}
684	}
685	WARN_ON(!!dev->msix_enabled);
686
687	/* Check whether driver already requested for MSI irq */
688   	if (dev->msi_enabled) {
689		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
690		       "Device already has an MSI irq assigned\n",
691		       pci_name(dev));
692		return -EINVAL;
693	}
694	status = msix_capability_init(dev, entries, nvec);
695	return status;
696}
697EXPORT_SYMBOL(pci_enable_msix);
698
699static void msix_free_all_irqs(struct pci_dev *dev)
700{
701	msi_free_irqs(dev);
702}
703
704void pci_msix_shutdown(struct pci_dev* dev)
705{
706	if (!pci_msi_enable || !dev || !dev->msix_enabled)
707		return;
708
709	msix_set_enable(dev, 0);
710	pci_intx_for_msi(dev, 1);
711	dev->msix_enabled = 0;
712}
713void pci_disable_msix(struct pci_dev* dev)
714{
715	if (!pci_msi_enable || !dev || !dev->msix_enabled)
716		return;
717
718	pci_msix_shutdown(dev);
719
720	msix_free_all_irqs(dev);
721}
722EXPORT_SYMBOL(pci_disable_msix);
723
724/**
725 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
726 * @dev: pointer to the pci_dev data structure of MSI(X) device function
727 *
728 * Being called during hotplug remove, from which the device function
729 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
730 * allocated for this device function, are reclaimed to unused state,
731 * which may be used later on.
732 **/
733void msi_remove_pci_irq_vectors(struct pci_dev* dev)
734{
735	if (!pci_msi_enable || !dev)
736 		return;
737
738	if (dev->msi_enabled)
739		msi_free_irqs(dev);
740
741	if (dev->msix_enabled)
742		msix_free_all_irqs(dev);
743}
744
745void pci_no_msi(void)
746{
747	pci_msi_enable = 0;
748}
749
750void pci_msi_init_pci_dev(struct pci_dev *dev)
751{
752	INIT_LIST_HEAD(&dev->msi_list);
753}
754