msi.c revision b5fbf53324f65646154e172af350674d5a2a1629
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17#include <linux/msi.h>
18#include <linux/smp.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22
23#include "pci.h"
24#include "msi.h"
25
26static int pci_msi_enable = 1;
27
28/* Arch hooks */
29
30#ifndef arch_msi_check_device
31int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{
33	return 0;
34}
35#endif
36
37#ifndef arch_setup_msi_irqs
38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
39{
40	struct msi_desc *entry;
41	int ret;
42
43	list_for_each_entry(entry, &dev->msi_list, list) {
44		ret = arch_setup_msi_irq(dev, entry);
45		if (ret < 0)
46			return ret;
47		if (ret > 0)
48			return -ENOSPC;
49	}
50
51	return 0;
52}
53#endif
54
55#ifndef arch_teardown_msi_irqs
56void arch_teardown_msi_irqs(struct pci_dev *dev)
57{
58	struct msi_desc *entry;
59
60	list_for_each_entry(entry, &dev->msi_list, list) {
61		if (entry->irq != 0)
62			arch_teardown_msi_irq(entry->irq);
63	}
64}
65#endif
66
67static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
68{
69	u16 control;
70
71	if (pos) {
72		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
73		control &= ~PCI_MSI_FLAGS_ENABLE;
74		if (enable)
75			control |= PCI_MSI_FLAGS_ENABLE;
76		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
77	}
78}
79
80static void msi_set_enable(struct pci_dev *dev, int enable)
81{
82	__msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
83}
84
85static void msix_set_enable(struct pci_dev *dev, int enable)
86{
87	int pos;
88	u16 control;
89
90	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
91	if (pos) {
92		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
93		control &= ~PCI_MSIX_FLAGS_ENABLE;
94		if (enable)
95			control |= PCI_MSIX_FLAGS_ENABLE;
96		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
97	}
98}
99
100static inline __attribute_const__ u32 msi_mask(unsigned x)
101{
102	/* Don't shift by >= width of type */
103	if (x >= 5)
104		return 0xffffffff;
105	return (1 << (1 << x)) - 1;
106}
107
108static void msix_flush_writes(struct irq_desc *desc)
109{
110	struct msi_desc *entry;
111
112	entry = get_irq_desc_msi(desc);
113	BUG_ON(!entry || !entry->dev);
114	switch (entry->msi_attrib.type) {
115	case PCI_CAP_ID_MSI:
116		/* nothing to do */
117		break;
118	case PCI_CAP_ID_MSIX:
119	{
120		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
121			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
122		readl(entry->mask_base + offset);
123		break;
124	}
125	default:
126		BUG();
127		break;
128	}
129}
130
131/*
132 * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
133 * mask all MSI interrupts by clearing the MSI enable bit does not work
134 * reliably as devices without an INTx disable bit will then generate a
135 * level IRQ which will never be cleared.
136 *
137 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
138 * doesn't support MSI masking.
139 */
140static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag)
141{
142	struct msi_desc *entry;
143
144	entry = get_irq_desc_msi(desc);
145	BUG_ON(!entry || !entry->dev);
146	switch (entry->msi_attrib.type) {
147	case PCI_CAP_ID_MSI:
148		if (entry->msi_attrib.maskbit) {
149			int pos;
150			u32 mask_bits;
151
152			pos = (long)entry->mask_base;
153			pci_read_config_dword(entry->dev, pos, &mask_bits);
154			mask_bits &= ~(mask);
155			mask_bits |= flag & mask;
156			pci_write_config_dword(entry->dev, pos, mask_bits);
157		} else {
158			return 0;
159		}
160		break;
161	case PCI_CAP_ID_MSIX:
162	{
163		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
164			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
165		writel(flag, entry->mask_base + offset);
166		readl(entry->mask_base + offset);
167		break;
168	}
169	default:
170		BUG();
171		break;
172	}
173	entry->msi_attrib.masked = !!flag;
174	return 1;
175}
176
177void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
178{
179	struct msi_desc *entry = get_irq_desc_msi(desc);
180	switch(entry->msi_attrib.type) {
181	case PCI_CAP_ID_MSI:
182	{
183		struct pci_dev *dev = entry->dev;
184		int pos = entry->msi_attrib.pos;
185		u16 data;
186
187		pci_read_config_dword(dev, msi_lower_address_reg(pos),
188					&msg->address_lo);
189		if (entry->msi_attrib.is_64) {
190			pci_read_config_dword(dev, msi_upper_address_reg(pos),
191						&msg->address_hi);
192			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
193		} else {
194			msg->address_hi = 0;
195			pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
196		}
197		msg->data = data;
198		break;
199	}
200	case PCI_CAP_ID_MSIX:
201	{
202		void __iomem *base;
203		base = entry->mask_base +
204			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
205
206		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
207		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
208		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
209 		break;
210 	}
211 	default:
212		BUG();
213	}
214}
215
216void read_msi_msg(unsigned int irq, struct msi_msg *msg)
217{
218	struct irq_desc *desc = irq_to_desc(irq);
219
220	read_msi_msg_desc(desc, msg);
221}
222
223void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
224{
225	struct msi_desc *entry = get_irq_desc_msi(desc);
226	switch (entry->msi_attrib.type) {
227	case PCI_CAP_ID_MSI:
228	{
229		struct pci_dev *dev = entry->dev;
230		int pos = entry->msi_attrib.pos;
231
232		pci_write_config_dword(dev, msi_lower_address_reg(pos),
233					msg->address_lo);
234		if (entry->msi_attrib.is_64) {
235			pci_write_config_dword(dev, msi_upper_address_reg(pos),
236						msg->address_hi);
237			pci_write_config_word(dev, msi_data_reg(pos, 1),
238						msg->data);
239		} else {
240			pci_write_config_word(dev, msi_data_reg(pos, 0),
241						msg->data);
242		}
243		break;
244	}
245	case PCI_CAP_ID_MSIX:
246	{
247		void __iomem *base;
248		base = entry->mask_base +
249			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
250
251		writel(msg->address_lo,
252			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
253		writel(msg->address_hi,
254			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
255		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
256		break;
257	}
258	default:
259		BUG();
260	}
261	entry->msg = *msg;
262}
263
264void write_msi_msg(unsigned int irq, struct msi_msg *msg)
265{
266	struct irq_desc *desc = irq_to_desc(irq);
267
268	write_msi_msg_desc(desc, msg);
269}
270
271void mask_msi_irq(unsigned int irq)
272{
273	struct irq_desc *desc = irq_to_desc(irq);
274
275	msi_set_mask_bits(desc, 1, 1);
276	msix_flush_writes(desc);
277}
278
279void unmask_msi_irq(unsigned int irq)
280{
281	struct irq_desc *desc = irq_to_desc(irq);
282
283	msi_set_mask_bits(desc, 1, 0);
284	msix_flush_writes(desc);
285}
286
287static int msi_free_irqs(struct pci_dev* dev);
288
289static struct msi_desc* alloc_msi_entry(void)
290{
291	struct msi_desc *entry;
292
293	entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
294	if (!entry)
295		return NULL;
296
297	INIT_LIST_HEAD(&entry->list);
298	entry->irq = 0;
299	entry->dev = NULL;
300
301	return entry;
302}
303
304static void pci_intx_for_msi(struct pci_dev *dev, int enable)
305{
306	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
307		pci_intx(dev, enable);
308}
309
310static void __pci_restore_msi_state(struct pci_dev *dev)
311{
312	int pos;
313	u16 control;
314	struct msi_desc *entry;
315
316	if (!dev->msi_enabled)
317		return;
318
319	entry = get_irq_msi(dev->irq);
320	pos = entry->msi_attrib.pos;
321
322	pci_intx_for_msi(dev, 0);
323	msi_set_enable(dev, 0);
324	write_msi_msg(dev->irq, &entry->msg);
325	if (entry->msi_attrib.maskbit) {
326		struct irq_desc *desc = irq_to_desc(dev->irq);
327		msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
328				  entry->msi_attrib.masked);
329	}
330
331	pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
332	control &= ~PCI_MSI_FLAGS_QSIZE;
333	control |= PCI_MSI_FLAGS_ENABLE;
334	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
335}
336
337static void __pci_restore_msix_state(struct pci_dev *dev)
338{
339	int pos;
340	struct msi_desc *entry;
341	u16 control;
342
343	if (!dev->msix_enabled)
344		return;
345
346	/* route the table */
347	pci_intx_for_msi(dev, 0);
348	msix_set_enable(dev, 0);
349
350	list_for_each_entry(entry, &dev->msi_list, list) {
351		struct irq_desc *desc = irq_to_desc(entry->irq);
352		write_msi_msg(entry->irq, &entry->msg);
353		msi_set_mask_bits(desc, 1, entry->msi_attrib.masked);
354	}
355
356	BUG_ON(list_empty(&dev->msi_list));
357	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
358	pos = entry->msi_attrib.pos;
359	pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
360	control &= ~PCI_MSIX_FLAGS_MASKALL;
361	control |= PCI_MSIX_FLAGS_ENABLE;
362	pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
363}
364
365void pci_restore_msi_state(struct pci_dev *dev)
366{
367	__pci_restore_msi_state(dev);
368	__pci_restore_msix_state(dev);
369}
370EXPORT_SYMBOL_GPL(pci_restore_msi_state);
371
372/**
373 * msi_capability_init - configure device's MSI capability structure
374 * @dev: pointer to the pci_dev data structure of MSI device function
375 *
376 * Setup the MSI capability structure of device function with a single
377 * MSI irq, regardless of device function is capable of handling
378 * multiple messages. A return of zero indicates the successful setup
379 * of an entry zero with the new MSI irq or non-zero for otherwise.
380 **/
381static int msi_capability_init(struct pci_dev *dev)
382{
383	struct msi_desc *entry;
384	int pos, ret;
385	u16 control;
386
387	msi_set_enable(dev, 0);	/* Ensure msi is disabled as I set it up */
388
389   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
390	pci_read_config_word(dev, msi_control_reg(pos), &control);
391	/* MSI Entry Initialization */
392	entry = alloc_msi_entry();
393	if (!entry)
394		return -ENOMEM;
395
396	entry->msi_attrib.type = PCI_CAP_ID_MSI;
397	entry->msi_attrib.is_64 = is_64bit_address(control);
398	entry->msi_attrib.entry_nr = 0;
399	entry->msi_attrib.maskbit = is_mask_bit_support(control);
400	entry->msi_attrib.masked = 1;
401	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
402	entry->msi_attrib.pos = pos;
403	entry->dev = dev;
404	if (entry->msi_attrib.maskbit) {
405		unsigned int base, maskbits, temp;
406
407		base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
408		entry->mask_base = (void __iomem *)(long)base;
409
410		/* All MSIs are unmasked by default, Mask them all */
411		pci_read_config_dword(dev, base, &maskbits);
412		temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1);
413		maskbits |= temp;
414		pci_write_config_dword(dev, base, maskbits);
415		entry->msi_attrib.maskbits_mask = temp;
416	}
417	list_add_tail(&entry->list, &dev->msi_list);
418
419	/* Configure MSI capability structure */
420	ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
421	if (ret) {
422		msi_free_irqs(dev);
423		return ret;
424	}
425
426	/* Set MSI enabled bits	 */
427	pci_intx_for_msi(dev, 0);
428	msi_set_enable(dev, 1);
429	dev->msi_enabled = 1;
430
431	dev->irq = entry->irq;
432	return 0;
433}
434
435/**
436 * msix_capability_init - configure device's MSI-X capability
437 * @dev: pointer to the pci_dev data structure of MSI-X device function
438 * @entries: pointer to an array of struct msix_entry entries
439 * @nvec: number of @entries
440 *
441 * Setup the MSI-X capability structure of device function with a
442 * single MSI-X irq. A return of zero indicates the successful setup of
443 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
444 **/
445static int msix_capability_init(struct pci_dev *dev,
446				struct msix_entry *entries, int nvec)
447{
448	struct msi_desc *entry;
449	int pos, i, j, nr_entries, ret;
450	unsigned long phys_addr;
451	u32 table_offset;
452 	u16 control;
453	u8 bir;
454	void __iomem *base;
455
456	msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
457
458   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
459	/* Request & Map MSI-X table region */
460 	pci_read_config_word(dev, msi_control_reg(pos), &control);
461	nr_entries = multi_msix_capable(control);
462
463 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
464	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
465	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
466	phys_addr = pci_resource_start (dev, bir) + table_offset;
467	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
468	if (base == NULL)
469		return -ENOMEM;
470
471	/* MSI-X Table Initialization */
472	for (i = 0; i < nvec; i++) {
473		entry = alloc_msi_entry();
474		if (!entry)
475			break;
476
477 		j = entries[i].entry;
478		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
479		entry->msi_attrib.is_64 = 1;
480		entry->msi_attrib.entry_nr = j;
481		entry->msi_attrib.maskbit = 1;
482		entry->msi_attrib.masked = 1;
483		entry->msi_attrib.default_irq = dev->irq;
484		entry->msi_attrib.pos = pos;
485		entry->dev = dev;
486		entry->mask_base = base;
487
488		list_add_tail(&entry->list, &dev->msi_list);
489	}
490
491	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
492	if (ret < 0) {
493		/* If we had some success report the number of irqs
494		 * we succeeded in setting up. */
495		int avail = 0;
496		list_for_each_entry(entry, &dev->msi_list, list) {
497			if (entry->irq != 0) {
498				avail++;
499			}
500		}
501
502		if (avail != 0)
503			ret = avail;
504	}
505
506	if (ret) {
507		msi_free_irqs(dev);
508		return ret;
509	}
510
511	i = 0;
512	list_for_each_entry(entry, &dev->msi_list, list) {
513		entries[i].vector = entry->irq;
514		set_irq_msi(entry->irq, entry);
515		i++;
516	}
517	/* Set MSI-X enabled bits */
518	pci_intx_for_msi(dev, 0);
519	msix_set_enable(dev, 1);
520	dev->msix_enabled = 1;
521
522	return 0;
523}
524
525/**
526 * pci_msi_check_device - check whether MSI may be enabled on a device
527 * @dev: pointer to the pci_dev data structure of MSI device function
528 * @nvec: how many MSIs have been requested ?
529 * @type: are we checking for MSI or MSI-X ?
530 *
531 * Look at global flags, the device itself, and its parent busses
532 * to determine if MSI/-X are supported for the device. If MSI/-X is
533 * supported return 0, else return an error code.
534 **/
535static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
536{
537	struct pci_bus *bus;
538	int ret;
539
540	/* MSI must be globally enabled and supported by the device */
541	if (!pci_msi_enable || !dev || dev->no_msi)
542		return -EINVAL;
543
544	/*
545	 * You can't ask to have 0 or less MSIs configured.
546	 *  a) it's stupid ..
547	 *  b) the list manipulation code assumes nvec >= 1.
548	 */
549	if (nvec < 1)
550		return -ERANGE;
551
552	/* Any bridge which does NOT route MSI transactions from it's
553	 * secondary bus to it's primary bus must set NO_MSI flag on
554	 * the secondary pci_bus.
555	 * We expect only arch-specific PCI host bus controller driver
556	 * or quirks for specific PCI bridges to be setting NO_MSI.
557	 */
558	for (bus = dev->bus; bus; bus = bus->parent)
559		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
560			return -EINVAL;
561
562	ret = arch_msi_check_device(dev, nvec, type);
563	if (ret)
564		return ret;
565
566	if (!pci_find_capability(dev, type))
567		return -EINVAL;
568
569	return 0;
570}
571
572/**
573 * pci_enable_msi - configure device's MSI capability structure
574 * @dev: pointer to the pci_dev data structure of MSI device function
575 *
576 * Setup the MSI capability structure of device function with
577 * a single MSI irq upon its software driver call to request for
578 * MSI mode enabled on its hardware device function. A return of zero
579 * indicates the successful setup of an entry zero with the new MSI
580 * irq or non-zero for otherwise.
581 **/
582int pci_enable_msi(struct pci_dev* dev)
583{
584	int status;
585
586	status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
587	if (status)
588		return status;
589
590	WARN_ON(!!dev->msi_enabled);
591
592	/* Check whether driver already requested for MSI-X irqs */
593	if (dev->msix_enabled) {
594		dev_info(&dev->dev, "can't enable MSI "
595			 "(MSI-X already enabled)\n");
596		return -EINVAL;
597	}
598	status = msi_capability_init(dev);
599	return status;
600}
601EXPORT_SYMBOL(pci_enable_msi);
602
603void pci_msi_shutdown(struct pci_dev* dev)
604{
605	struct msi_desc *entry;
606
607	if (!pci_msi_enable || !dev || !dev->msi_enabled)
608		return;
609
610	msi_set_enable(dev, 0);
611	pci_intx_for_msi(dev, 1);
612	dev->msi_enabled = 0;
613
614	BUG_ON(list_empty(&dev->msi_list));
615	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
616	/* Return the the pci reset with msi irqs unmasked */
617	if (entry->msi_attrib.maskbit) {
618		u32 mask = entry->msi_attrib.maskbits_mask;
619		struct irq_desc *desc = irq_to_desc(dev->irq);
620		msi_set_mask_bits(desc, mask, ~mask);
621	}
622	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
623		return;
624
625	/* Restore dev->irq to its default pin-assertion irq */
626	dev->irq = entry->msi_attrib.default_irq;
627}
628void pci_disable_msi(struct pci_dev* dev)
629{
630	struct msi_desc *entry;
631
632	if (!pci_msi_enable || !dev || !dev->msi_enabled)
633		return;
634
635	pci_msi_shutdown(dev);
636
637	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
638	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
639		return;
640
641	msi_free_irqs(dev);
642}
643EXPORT_SYMBOL(pci_disable_msi);
644
645static int msi_free_irqs(struct pci_dev* dev)
646{
647	struct msi_desc *entry, *tmp;
648
649	list_for_each_entry(entry, &dev->msi_list, list) {
650		if (entry->irq)
651			BUG_ON(irq_has_action(entry->irq));
652	}
653
654	arch_teardown_msi_irqs(dev);
655
656	list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
657		if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
658			writel(1, entry->mask_base + entry->msi_attrib.entry_nr
659				  * PCI_MSIX_ENTRY_SIZE
660				  + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
661
662			if (list_is_last(&entry->list, &dev->msi_list))
663				iounmap(entry->mask_base);
664		}
665		list_del(&entry->list);
666		kfree(entry);
667	}
668
669	return 0;
670}
671
672/**
673 * pci_msix_table_size - return the number of device's MSI-X table entries
674 * @dev: pointer to the pci_dev data structure of MSI-X device function
675 */
676int pci_msix_table_size(struct pci_dev *dev)
677{
678	int pos;
679	u16 control;
680
681	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
682	if (!pos)
683		return 0;
684
685	pci_read_config_word(dev, msi_control_reg(pos), &control);
686	return multi_msix_capable(control);
687}
688
689/**
690 * pci_enable_msix - configure device's MSI-X capability structure
691 * @dev: pointer to the pci_dev data structure of MSI-X device function
692 * @entries: pointer to an array of MSI-X entries
693 * @nvec: number of MSI-X irqs requested for allocation by device driver
694 *
695 * Setup the MSI-X capability structure of device function with the number
696 * of requested irqs upon its software driver call to request for
697 * MSI-X mode enabled on its hardware device function. A return of zero
698 * indicates the successful configuration of MSI-X capability structure
699 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
700 * Or a return of > 0 indicates that driver request is exceeding the number
701 * of irqs available. Driver should use the returned value to re-send
702 * its request.
703 **/
704int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
705{
706	int status, nr_entries;
707	int i, j;
708
709	if (!entries)
710 		return -EINVAL;
711
712	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
713	if (status)
714		return status;
715
716	nr_entries = pci_msix_table_size(dev);
717	if (nvec > nr_entries)
718		return -EINVAL;
719
720	/* Check for any invalid entries */
721	for (i = 0; i < nvec; i++) {
722		if (entries[i].entry >= nr_entries)
723			return -EINVAL;		/* invalid entry */
724		for (j = i + 1; j < nvec; j++) {
725			if (entries[i].entry == entries[j].entry)
726				return -EINVAL;	/* duplicate entry */
727		}
728	}
729	WARN_ON(!!dev->msix_enabled);
730
731	/* Check whether driver already requested for MSI irq */
732   	if (dev->msi_enabled) {
733		dev_info(&dev->dev, "can't enable MSI-X "
734		       "(MSI IRQ already assigned)\n");
735		return -EINVAL;
736	}
737	status = msix_capability_init(dev, entries, nvec);
738	return status;
739}
740EXPORT_SYMBOL(pci_enable_msix);
741
742static void msix_free_all_irqs(struct pci_dev *dev)
743{
744	msi_free_irqs(dev);
745}
746
747void pci_msix_shutdown(struct pci_dev* dev)
748{
749	if (!pci_msi_enable || !dev || !dev->msix_enabled)
750		return;
751
752	msix_set_enable(dev, 0);
753	pci_intx_for_msi(dev, 1);
754	dev->msix_enabled = 0;
755}
756void pci_disable_msix(struct pci_dev* dev)
757{
758	if (!pci_msi_enable || !dev || !dev->msix_enabled)
759		return;
760
761	pci_msix_shutdown(dev);
762
763	msix_free_all_irqs(dev);
764}
765EXPORT_SYMBOL(pci_disable_msix);
766
767/**
768 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
769 * @dev: pointer to the pci_dev data structure of MSI(X) device function
770 *
771 * Being called during hotplug remove, from which the device function
772 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
773 * allocated for this device function, are reclaimed to unused state,
774 * which may be used later on.
775 **/
776void msi_remove_pci_irq_vectors(struct pci_dev* dev)
777{
778	if (!pci_msi_enable || !dev)
779 		return;
780
781	if (dev->msi_enabled)
782		msi_free_irqs(dev);
783
784	if (dev->msix_enabled)
785		msix_free_all_irqs(dev);
786}
787
788void pci_no_msi(void)
789{
790	pci_msi_enable = 0;
791}
792
793/**
794 * pci_msi_enabled - is MSI enabled?
795 *
796 * Returns true if MSI has not been disabled by the command-line option
797 * pci=nomsi.
798 **/
799int pci_msi_enabled(void)
800{
801	return pci_msi_enable;
802}
803EXPORT_SYMBOL(pci_msi_enabled);
804
805void pci_msi_init_pci_dev(struct pci_dev *dev)
806{
807	INIT_LIST_HEAD(&dev->msi_list);
808}
809