msi.c revision bffac3c593eba1f9da3efd0199e49ea6558a40ce
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17#include <linux/msi.h>
18#include <linux/smp.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22
23#include "pci.h"
24#include "msi.h"
25
26static int pci_msi_enable = 1;
27
28/* Arch hooks */
29
30int __attribute__ ((weak))
31arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{
33	return 0;
34}
35
36int __attribute__ ((weak))
37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
38{
39	return 0;
40}
41
42int __attribute__ ((weak))
43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44{
45	struct msi_desc *entry;
46	int ret;
47
48	list_for_each_entry(entry, &dev->msi_list, list) {
49		ret = arch_setup_msi_irq(dev, entry);
50		if (ret)
51			return ret;
52	}
53
54	return 0;
55}
56
57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
58{
59	return;
60}
61
62void __attribute__ ((weak))
63arch_teardown_msi_irqs(struct pci_dev *dev)
64{
65	struct msi_desc *entry;
66
67	list_for_each_entry(entry, &dev->msi_list, list) {
68		if (entry->irq != 0)
69			arch_teardown_msi_irq(entry->irq);
70	}
71}
72
73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{
75	u16 control;
76
77	if (pos) {
78		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
79		control &= ~PCI_MSI_FLAGS_ENABLE;
80		if (enable)
81			control |= PCI_MSI_FLAGS_ENABLE;
82		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
83	}
84}
85
86static void msi_set_enable(struct pci_dev *dev, int enable)
87{
88	__msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
89}
90
91static void msix_set_enable(struct pci_dev *dev, int enable)
92{
93	int pos;
94	u16 control;
95
96	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
97	if (pos) {
98		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
99		control &= ~PCI_MSIX_FLAGS_ENABLE;
100		if (enable)
101			control |= PCI_MSIX_FLAGS_ENABLE;
102		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
103	}
104}
105
106/*
107 * Essentially, this is ((1 << (1 << x)) - 1), but without the
108 * undefinedness of a << 32.
109 */
110static inline __attribute_const__ u32 msi_mask(unsigned x)
111{
112	static const u32 mask[] = { 1, 2, 4, 0xf, 0xff, 0xffff, 0xffffffff };
113	return mask[x];
114}
115
116static void msix_flush_writes(struct irq_desc *desc)
117{
118	struct msi_desc *entry;
119
120	entry = get_irq_desc_msi(desc);
121	BUG_ON(!entry || !entry->dev);
122	switch (entry->msi_attrib.type) {
123	case PCI_CAP_ID_MSI:
124		/* nothing to do */
125		break;
126	case PCI_CAP_ID_MSIX:
127	{
128		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
129			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
130		readl(entry->mask_base + offset);
131		break;
132	}
133	default:
134		BUG();
135		break;
136	}
137}
138
139/*
140 * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
141 * mask all MSI interrupts by clearing the MSI enable bit does not work
142 * reliably as devices without an INTx disable bit will then generate a
143 * level IRQ which will never be cleared.
144 *
145 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
146 * doesn't support MSI masking.
147 */
148static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag)
149{
150	struct msi_desc *entry;
151
152	entry = get_irq_desc_msi(desc);
153	BUG_ON(!entry || !entry->dev);
154	switch (entry->msi_attrib.type) {
155	case PCI_CAP_ID_MSI:
156		if (entry->msi_attrib.maskbit) {
157			int pos;
158			u32 mask_bits;
159
160			pos = (long)entry->mask_base;
161			pci_read_config_dword(entry->dev, pos, &mask_bits);
162			mask_bits &= ~(mask);
163			mask_bits |= flag & mask;
164			pci_write_config_dword(entry->dev, pos, mask_bits);
165		} else {
166			return 0;
167		}
168		break;
169	case PCI_CAP_ID_MSIX:
170	{
171		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
172			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
173		writel(flag, entry->mask_base + offset);
174		readl(entry->mask_base + offset);
175		break;
176	}
177	default:
178		BUG();
179		break;
180	}
181	entry->msi_attrib.masked = !!flag;
182	return 1;
183}
184
185void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
186{
187	struct msi_desc *entry = get_irq_desc_msi(desc);
188	switch(entry->msi_attrib.type) {
189	case PCI_CAP_ID_MSI:
190	{
191		struct pci_dev *dev = entry->dev;
192		int pos = entry->msi_attrib.pos;
193		u16 data;
194
195		pci_read_config_dword(dev, msi_lower_address_reg(pos),
196					&msg->address_lo);
197		if (entry->msi_attrib.is_64) {
198			pci_read_config_dword(dev, msi_upper_address_reg(pos),
199						&msg->address_hi);
200			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
201		} else {
202			msg->address_hi = 0;
203			pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
204		}
205		msg->data = data;
206		break;
207	}
208	case PCI_CAP_ID_MSIX:
209	{
210		void __iomem *base;
211		base = entry->mask_base +
212			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
213
214		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
215		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
216		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
217 		break;
218 	}
219 	default:
220		BUG();
221	}
222}
223
224void read_msi_msg(unsigned int irq, struct msi_msg *msg)
225{
226	struct irq_desc *desc = irq_to_desc(irq);
227
228	read_msi_msg_desc(desc, msg);
229}
230
231void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
232{
233	struct msi_desc *entry = get_irq_desc_msi(desc);
234	switch (entry->msi_attrib.type) {
235	case PCI_CAP_ID_MSI:
236	{
237		struct pci_dev *dev = entry->dev;
238		int pos = entry->msi_attrib.pos;
239
240		pci_write_config_dword(dev, msi_lower_address_reg(pos),
241					msg->address_lo);
242		if (entry->msi_attrib.is_64) {
243			pci_write_config_dword(dev, msi_upper_address_reg(pos),
244						msg->address_hi);
245			pci_write_config_word(dev, msi_data_reg(pos, 1),
246						msg->data);
247		} else {
248			pci_write_config_word(dev, msi_data_reg(pos, 0),
249						msg->data);
250		}
251		break;
252	}
253	case PCI_CAP_ID_MSIX:
254	{
255		void __iomem *base;
256		base = entry->mask_base +
257			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
258
259		writel(msg->address_lo,
260			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
261		writel(msg->address_hi,
262			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
263		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
264		break;
265	}
266	default:
267		BUG();
268	}
269	entry->msg = *msg;
270}
271
272void write_msi_msg(unsigned int irq, struct msi_msg *msg)
273{
274	struct irq_desc *desc = irq_to_desc(irq);
275
276	write_msi_msg_desc(desc, msg);
277}
278
279void mask_msi_irq(unsigned int irq)
280{
281	struct irq_desc *desc = irq_to_desc(irq);
282
283	msi_set_mask_bits(desc, 1, 1);
284	msix_flush_writes(desc);
285}
286
287void unmask_msi_irq(unsigned int irq)
288{
289	struct irq_desc *desc = irq_to_desc(irq);
290
291	msi_set_mask_bits(desc, 1, 0);
292	msix_flush_writes(desc);
293}
294
295static int msi_free_irqs(struct pci_dev* dev);
296
297static struct msi_desc* alloc_msi_entry(void)
298{
299	struct msi_desc *entry;
300
301	entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
302	if (!entry)
303		return NULL;
304
305	INIT_LIST_HEAD(&entry->list);
306	entry->irq = 0;
307	entry->dev = NULL;
308
309	return entry;
310}
311
312static void pci_intx_for_msi(struct pci_dev *dev, int enable)
313{
314	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
315		pci_intx(dev, enable);
316}
317
318static void __pci_restore_msi_state(struct pci_dev *dev)
319{
320	int pos;
321	u16 control;
322	struct msi_desc *entry;
323
324	if (!dev->msi_enabled)
325		return;
326
327	entry = get_irq_msi(dev->irq);
328	pos = entry->msi_attrib.pos;
329
330	pci_intx_for_msi(dev, 0);
331	msi_set_enable(dev, 0);
332	write_msi_msg(dev->irq, &entry->msg);
333	if (entry->msi_attrib.maskbit) {
334		struct irq_desc *desc = irq_to_desc(dev->irq);
335		msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
336				  entry->msi_attrib.masked);
337	}
338
339	pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
340	control &= ~PCI_MSI_FLAGS_QSIZE;
341	control |= PCI_MSI_FLAGS_ENABLE;
342	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
343}
344
345static void __pci_restore_msix_state(struct pci_dev *dev)
346{
347	int pos;
348	struct msi_desc *entry;
349	u16 control;
350
351	if (!dev->msix_enabled)
352		return;
353
354	/* route the table */
355	pci_intx_for_msi(dev, 0);
356	msix_set_enable(dev, 0);
357
358	list_for_each_entry(entry, &dev->msi_list, list) {
359		struct irq_desc *desc = irq_to_desc(entry->irq);
360		write_msi_msg(entry->irq, &entry->msg);
361		msi_set_mask_bits(desc, 1, entry->msi_attrib.masked);
362	}
363
364	BUG_ON(list_empty(&dev->msi_list));
365	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
366	pos = entry->msi_attrib.pos;
367	pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
368	control &= ~PCI_MSIX_FLAGS_MASKALL;
369	control |= PCI_MSIX_FLAGS_ENABLE;
370	pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
371}
372
373void pci_restore_msi_state(struct pci_dev *dev)
374{
375	__pci_restore_msi_state(dev);
376	__pci_restore_msix_state(dev);
377}
378EXPORT_SYMBOL_GPL(pci_restore_msi_state);
379
380/**
381 * msi_capability_init - configure device's MSI capability structure
382 * @dev: pointer to the pci_dev data structure of MSI device function
383 *
384 * Setup the MSI capability structure of device function with a single
385 * MSI irq, regardless of device function is capable of handling
386 * multiple messages. A return of zero indicates the successful setup
387 * of an entry zero with the new MSI irq or non-zero for otherwise.
388 **/
389static int msi_capability_init(struct pci_dev *dev)
390{
391	struct msi_desc *entry;
392	int pos, ret;
393	u16 control;
394
395	msi_set_enable(dev, 0);	/* Ensure msi is disabled as I set it up */
396
397   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
398	pci_read_config_word(dev, msi_control_reg(pos), &control);
399	/* MSI Entry Initialization */
400	entry = alloc_msi_entry();
401	if (!entry)
402		return -ENOMEM;
403
404	entry->msi_attrib.type = PCI_CAP_ID_MSI;
405	entry->msi_attrib.is_64 = is_64bit_address(control);
406	entry->msi_attrib.entry_nr = 0;
407	entry->msi_attrib.maskbit = is_mask_bit_support(control);
408	entry->msi_attrib.masked = 1;
409	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
410	entry->msi_attrib.pos = pos;
411	entry->dev = dev;
412	if (entry->msi_attrib.maskbit) {
413		unsigned int base, maskbits, temp;
414
415		base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
416		entry->mask_base = (void __iomem *)(long)base;
417
418		/* All MSIs are unmasked by default, Mask them all */
419		pci_read_config_dword(dev, base, &maskbits);
420		temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1);
421		maskbits |= temp;
422		pci_write_config_dword(dev, base, maskbits);
423		entry->msi_attrib.maskbits_mask = temp;
424	}
425	list_add_tail(&entry->list, &dev->msi_list);
426
427	/* Configure MSI capability structure */
428	ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
429	if (ret) {
430		msi_free_irqs(dev);
431		return ret;
432	}
433
434	/* Set MSI enabled bits	 */
435	pci_intx_for_msi(dev, 0);
436	msi_set_enable(dev, 1);
437	dev->msi_enabled = 1;
438
439	dev->irq = entry->irq;
440	return 0;
441}
442
443/**
444 * msix_capability_init - configure device's MSI-X capability
445 * @dev: pointer to the pci_dev data structure of MSI-X device function
446 * @entries: pointer to an array of struct msix_entry entries
447 * @nvec: number of @entries
448 *
449 * Setup the MSI-X capability structure of device function with a
450 * single MSI-X irq. A return of zero indicates the successful setup of
451 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
452 **/
453static int msix_capability_init(struct pci_dev *dev,
454				struct msix_entry *entries, int nvec)
455{
456	struct msi_desc *entry;
457	int pos, i, j, nr_entries, ret;
458	unsigned long phys_addr;
459	u32 table_offset;
460 	u16 control;
461	u8 bir;
462	void __iomem *base;
463
464	msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
465
466   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
467	/* Request & Map MSI-X table region */
468 	pci_read_config_word(dev, msi_control_reg(pos), &control);
469	nr_entries = multi_msix_capable(control);
470
471 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
472	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
473	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
474	phys_addr = pci_resource_start (dev, bir) + table_offset;
475	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
476	if (base == NULL)
477		return -ENOMEM;
478
479	/* MSI-X Table Initialization */
480	for (i = 0; i < nvec; i++) {
481		entry = alloc_msi_entry();
482		if (!entry)
483			break;
484
485 		j = entries[i].entry;
486		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
487		entry->msi_attrib.is_64 = 1;
488		entry->msi_attrib.entry_nr = j;
489		entry->msi_attrib.maskbit = 1;
490		entry->msi_attrib.masked = 1;
491		entry->msi_attrib.default_irq = dev->irq;
492		entry->msi_attrib.pos = pos;
493		entry->dev = dev;
494		entry->mask_base = base;
495
496		list_add_tail(&entry->list, &dev->msi_list);
497	}
498
499	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
500	if (ret) {
501		int avail = 0;
502		list_for_each_entry(entry, &dev->msi_list, list) {
503			if (entry->irq != 0) {
504				avail++;
505			}
506		}
507
508		msi_free_irqs(dev);
509
510		/* If we had some success report the number of irqs
511		 * we succeeded in setting up.
512		 */
513		if (avail == 0)
514			avail = ret;
515		return avail;
516	}
517
518	i = 0;
519	list_for_each_entry(entry, &dev->msi_list, list) {
520		entries[i].vector = entry->irq;
521		set_irq_msi(entry->irq, entry);
522		i++;
523	}
524	/* Set MSI-X enabled bits */
525	pci_intx_for_msi(dev, 0);
526	msix_set_enable(dev, 1);
527	dev->msix_enabled = 1;
528
529	return 0;
530}
531
532/**
533 * pci_msi_check_device - check whether MSI may be enabled on a device
534 * @dev: pointer to the pci_dev data structure of MSI device function
535 * @nvec: how many MSIs have been requested ?
536 * @type: are we checking for MSI or MSI-X ?
537 *
538 * Look at global flags, the device itself, and its parent busses
539 * to determine if MSI/-X are supported for the device. If MSI/-X is
540 * supported return 0, else return an error code.
541 **/
542static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
543{
544	struct pci_bus *bus;
545	int ret;
546
547	/* MSI must be globally enabled and supported by the device */
548	if (!pci_msi_enable || !dev || dev->no_msi)
549		return -EINVAL;
550
551	/*
552	 * You can't ask to have 0 or less MSIs configured.
553	 *  a) it's stupid ..
554	 *  b) the list manipulation code assumes nvec >= 1.
555	 */
556	if (nvec < 1)
557		return -ERANGE;
558
559	/* Any bridge which does NOT route MSI transactions from it's
560	 * secondary bus to it's primary bus must set NO_MSI flag on
561	 * the secondary pci_bus.
562	 * We expect only arch-specific PCI host bus controller driver
563	 * or quirks for specific PCI bridges to be setting NO_MSI.
564	 */
565	for (bus = dev->bus; bus; bus = bus->parent)
566		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
567			return -EINVAL;
568
569	ret = arch_msi_check_device(dev, nvec, type);
570	if (ret)
571		return ret;
572
573	if (!pci_find_capability(dev, type))
574		return -EINVAL;
575
576	return 0;
577}
578
579/**
580 * pci_enable_msi - configure device's MSI capability structure
581 * @dev: pointer to the pci_dev data structure of MSI device function
582 *
583 * Setup the MSI capability structure of device function with
584 * a single MSI irq upon its software driver call to request for
585 * MSI mode enabled on its hardware device function. A return of zero
586 * indicates the successful setup of an entry zero with the new MSI
587 * irq or non-zero for otherwise.
588 **/
589int pci_enable_msi(struct pci_dev* dev)
590{
591	int status;
592
593	status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
594	if (status)
595		return status;
596
597	WARN_ON(!!dev->msi_enabled);
598
599	/* Check whether driver already requested for MSI-X irqs */
600	if (dev->msix_enabled) {
601		dev_info(&dev->dev, "can't enable MSI "
602			 "(MSI-X already enabled)\n");
603		return -EINVAL;
604	}
605	status = msi_capability_init(dev);
606	return status;
607}
608EXPORT_SYMBOL(pci_enable_msi);
609
610void pci_msi_shutdown(struct pci_dev* dev)
611{
612	struct msi_desc *entry;
613
614	if (!pci_msi_enable || !dev || !dev->msi_enabled)
615		return;
616
617	msi_set_enable(dev, 0);
618	pci_intx_for_msi(dev, 1);
619	dev->msi_enabled = 0;
620
621	BUG_ON(list_empty(&dev->msi_list));
622	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
623	/* Return the the pci reset with msi irqs unmasked */
624	if (entry->msi_attrib.maskbit) {
625		u32 mask = entry->msi_attrib.maskbits_mask;
626		struct irq_desc *desc = irq_to_desc(dev->irq);
627		msi_set_mask_bits(desc, mask, ~mask);
628	}
629	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
630		return;
631
632	/* Restore dev->irq to its default pin-assertion irq */
633	dev->irq = entry->msi_attrib.default_irq;
634}
635void pci_disable_msi(struct pci_dev* dev)
636{
637	struct msi_desc *entry;
638
639	if (!pci_msi_enable || !dev || !dev->msi_enabled)
640		return;
641
642	pci_msi_shutdown(dev);
643
644	entry = list_entry(dev->msi_list.next, struct msi_desc, list);
645	if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
646		return;
647
648	msi_free_irqs(dev);
649}
650EXPORT_SYMBOL(pci_disable_msi);
651
652static int msi_free_irqs(struct pci_dev* dev)
653{
654	struct msi_desc *entry, *tmp;
655
656	list_for_each_entry(entry, &dev->msi_list, list) {
657		if (entry->irq)
658			BUG_ON(irq_has_action(entry->irq));
659	}
660
661	arch_teardown_msi_irqs(dev);
662
663	list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
664		if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
665			writel(1, entry->mask_base + entry->msi_attrib.entry_nr
666				  * PCI_MSIX_ENTRY_SIZE
667				  + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
668
669			if (list_is_last(&entry->list, &dev->msi_list))
670				iounmap(entry->mask_base);
671		}
672		list_del(&entry->list);
673		kfree(entry);
674	}
675
676	return 0;
677}
678
679/**
680 * pci_enable_msix - configure device's MSI-X capability structure
681 * @dev: pointer to the pci_dev data structure of MSI-X device function
682 * @entries: pointer to an array of MSI-X entries
683 * @nvec: number of MSI-X irqs requested for allocation by device driver
684 *
685 * Setup the MSI-X capability structure of device function with the number
686 * of requested irqs upon its software driver call to request for
687 * MSI-X mode enabled on its hardware device function. A return of zero
688 * indicates the successful configuration of MSI-X capability structure
689 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
690 * Or a return of > 0 indicates that driver request is exceeding the number
691 * of irqs available. Driver should use the returned value to re-send
692 * its request.
693 **/
694int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
695{
696	int status, pos, nr_entries;
697	int i, j;
698	u16 control;
699
700	if (!entries)
701 		return -EINVAL;
702
703	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
704	if (status)
705		return status;
706
707	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
708	pci_read_config_word(dev, msi_control_reg(pos), &control);
709	nr_entries = multi_msix_capable(control);
710	if (nvec > nr_entries)
711		return -EINVAL;
712
713	/* Check for any invalid entries */
714	for (i = 0; i < nvec; i++) {
715		if (entries[i].entry >= nr_entries)
716			return -EINVAL;		/* invalid entry */
717		for (j = i + 1; j < nvec; j++) {
718			if (entries[i].entry == entries[j].entry)
719				return -EINVAL;	/* duplicate entry */
720		}
721	}
722	WARN_ON(!!dev->msix_enabled);
723
724	/* Check whether driver already requested for MSI irq */
725   	if (dev->msi_enabled) {
726		dev_info(&dev->dev, "can't enable MSI-X "
727		       "(MSI IRQ already assigned)\n");
728		return -EINVAL;
729	}
730	status = msix_capability_init(dev, entries, nvec);
731	return status;
732}
733EXPORT_SYMBOL(pci_enable_msix);
734
735static void msix_free_all_irqs(struct pci_dev *dev)
736{
737	msi_free_irqs(dev);
738}
739
740void pci_msix_shutdown(struct pci_dev* dev)
741{
742	if (!pci_msi_enable || !dev || !dev->msix_enabled)
743		return;
744
745	msix_set_enable(dev, 0);
746	pci_intx_for_msi(dev, 1);
747	dev->msix_enabled = 0;
748}
749void pci_disable_msix(struct pci_dev* dev)
750{
751	if (!pci_msi_enable || !dev || !dev->msix_enabled)
752		return;
753
754	pci_msix_shutdown(dev);
755
756	msix_free_all_irqs(dev);
757}
758EXPORT_SYMBOL(pci_disable_msix);
759
760/**
761 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
762 * @dev: pointer to the pci_dev data structure of MSI(X) device function
763 *
764 * Being called during hotplug remove, from which the device function
765 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
766 * allocated for this device function, are reclaimed to unused state,
767 * which may be used later on.
768 **/
769void msi_remove_pci_irq_vectors(struct pci_dev* dev)
770{
771	if (!pci_msi_enable || !dev)
772 		return;
773
774	if (dev->msi_enabled)
775		msi_free_irqs(dev);
776
777	if (dev->msix_enabled)
778		msix_free_all_irqs(dev);
779}
780
781void pci_no_msi(void)
782{
783	pci_msi_enable = 0;
784}
785
786/**
787 * pci_msi_enabled - is MSI enabled?
788 *
789 * Returns true if MSI has not been disabled by the command-line option
790 * pci=nomsi.
791 **/
792int pci_msi_enabled(void)
793{
794	return pci_msi_enable;
795}
796EXPORT_SYMBOL(pci_msi_enabled);
797
798void pci_msi_init_pci_dev(struct pci_dev *dev)
799{
800	INIT_LIST_HEAD(&dev->msi_list);
801}
802