msi.c revision b1cbf4e4dddd708ba268c3a2bf38383a269d490a
1/*
2 * File:	msi.c
3 * Purpose:	PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/smp_lock.h>
16#include <linux/pci.h>
17#include <linux/proc_fs.h>
18#include <linux/msi.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22#include <asm/smp.h>
23
24#include "pci.h"
25#include "msi.h"
26
27static struct kmem_cache* msi_cachep;
28
29static int pci_msi_enable = 1;
30
31static int msi_cache_init(void)
32{
33	msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
34					0, SLAB_HWCACHE_ALIGN, NULL, NULL);
35	if (!msi_cachep)
36		return -ENOMEM;
37
38	return 0;
39}
40
41static void msi_set_enable(struct pci_dev *dev, int enable)
42{
43	int pos;
44	u16 control;
45
46	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
47	if (pos) {
48		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
49		control &= ~PCI_MSI_FLAGS_ENABLE;
50		if (enable)
51			control |= PCI_MSI_FLAGS_ENABLE;
52		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
53	}
54}
55
56static void msix_set_enable(struct pci_dev *dev, int enable)
57{
58	int pos;
59	u16 control;
60
61	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
62	if (pos) {
63		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
64		control &= ~PCI_MSIX_FLAGS_ENABLE;
65		if (enable)
66			control |= PCI_MSIX_FLAGS_ENABLE;
67		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
68	}
69}
70
71static void msi_set_mask_bit(unsigned int irq, int flag)
72{
73	struct msi_desc *entry;
74
75	entry = get_irq_msi(irq);
76	BUG_ON(!entry || !entry->dev);
77	switch (entry->msi_attrib.type) {
78	case PCI_CAP_ID_MSI:
79		if (entry->msi_attrib.maskbit) {
80			int pos;
81			u32 mask_bits;
82
83			pos = (long)entry->mask_base;
84			pci_read_config_dword(entry->dev, pos, &mask_bits);
85			mask_bits &= ~(1);
86			mask_bits |= flag;
87			pci_write_config_dword(entry->dev, pos, mask_bits);
88		}
89		break;
90	case PCI_CAP_ID_MSIX:
91	{
92		int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
93			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
94		writel(flag, entry->mask_base + offset);
95		break;
96	}
97	default:
98		BUG();
99		break;
100	}
101}
102
103void read_msi_msg(unsigned int irq, struct msi_msg *msg)
104{
105	struct msi_desc *entry = get_irq_msi(irq);
106	switch(entry->msi_attrib.type) {
107	case PCI_CAP_ID_MSI:
108	{
109		struct pci_dev *dev = entry->dev;
110		int pos = entry->msi_attrib.pos;
111		u16 data;
112
113		pci_read_config_dword(dev, msi_lower_address_reg(pos),
114					&msg->address_lo);
115		if (entry->msi_attrib.is_64) {
116			pci_read_config_dword(dev, msi_upper_address_reg(pos),
117						&msg->address_hi);
118			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
119		} else {
120			msg->address_hi = 0;
121			pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
122		}
123		msg->data = data;
124		break;
125	}
126	case PCI_CAP_ID_MSIX:
127	{
128		void __iomem *base;
129		base = entry->mask_base +
130			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
131
132		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
133		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
134		msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
135 		break;
136 	}
137 	default:
138		BUG();
139	}
140}
141
142void write_msi_msg(unsigned int irq, struct msi_msg *msg)
143{
144	struct msi_desc *entry = get_irq_msi(irq);
145	switch (entry->msi_attrib.type) {
146	case PCI_CAP_ID_MSI:
147	{
148		struct pci_dev *dev = entry->dev;
149		int pos = entry->msi_attrib.pos;
150
151		pci_write_config_dword(dev, msi_lower_address_reg(pos),
152					msg->address_lo);
153		if (entry->msi_attrib.is_64) {
154			pci_write_config_dword(dev, msi_upper_address_reg(pos),
155						msg->address_hi);
156			pci_write_config_word(dev, msi_data_reg(pos, 1),
157						msg->data);
158		} else {
159			pci_write_config_word(dev, msi_data_reg(pos, 0),
160						msg->data);
161		}
162		break;
163	}
164	case PCI_CAP_ID_MSIX:
165	{
166		void __iomem *base;
167		base = entry->mask_base +
168			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
169
170		writel(msg->address_lo,
171			base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
172		writel(msg->address_hi,
173			base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
174		writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
175		break;
176	}
177	default:
178		BUG();
179	}
180}
181
182void mask_msi_irq(unsigned int irq)
183{
184	msi_set_mask_bit(irq, 1);
185}
186
187void unmask_msi_irq(unsigned int irq)
188{
189	msi_set_mask_bit(irq, 0);
190}
191
192static int msi_free_irq(struct pci_dev* dev, int irq);
193
194static int msi_init(void)
195{
196	static int status = -ENOMEM;
197
198	if (!status)
199		return status;
200
201	status = msi_cache_init();
202	if (status < 0) {
203		pci_msi_enable = 0;
204		printk(KERN_WARNING "PCI: MSI cache init failed\n");
205		return status;
206	}
207
208	return status;
209}
210
211static struct msi_desc* alloc_msi_entry(void)
212{
213	struct msi_desc *entry;
214
215	entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
216	if (!entry)
217		return NULL;
218
219	entry->link.tail = entry->link.head = 0;	/* single message */
220	entry->dev = NULL;
221
222	return entry;
223}
224
225#ifdef CONFIG_PM
226static int __pci_save_msi_state(struct pci_dev *dev)
227{
228	int pos, i = 0;
229	u16 control;
230	struct pci_cap_saved_state *save_state;
231	u32 *cap;
232
233	if (!dev->msi_enabled)
234		return 0;
235
236	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
237	if (pos <= 0)
238		return 0;
239
240	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
241		GFP_KERNEL);
242	if (!save_state) {
243		printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
244		return -ENOMEM;
245	}
246	cap = &save_state->data[0];
247
248	pci_read_config_dword(dev, pos, &cap[i++]);
249	control = cap[0] >> 16;
250	pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
251	if (control & PCI_MSI_FLAGS_64BIT) {
252		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
253		pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
254	} else
255		pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
256	if (control & PCI_MSI_FLAGS_MASKBIT)
257		pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
258	save_state->cap_nr = PCI_CAP_ID_MSI;
259	pci_add_saved_cap(dev, save_state);
260	return 0;
261}
262
263static void __pci_restore_msi_state(struct pci_dev *dev)
264{
265	int i = 0, pos;
266	u16 control;
267	struct pci_cap_saved_state *save_state;
268	u32 *cap;
269
270	if (!dev->msi_enabled)
271		return;
272
273	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
274	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
275	if (!save_state || pos <= 0)
276		return;
277	cap = &save_state->data[0];
278
279	pci_intx(dev, 0);		/* disable intx */
280	control = cap[i++] >> 16;
281	msi_set_enable(dev, 0);
282	pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
283	if (control & PCI_MSI_FLAGS_64BIT) {
284		pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
285		pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
286	} else
287		pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
288	if (control & PCI_MSI_FLAGS_MASKBIT)
289		pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
290	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
291	pci_remove_saved_cap(save_state);
292	kfree(save_state);
293}
294
295static int __pci_save_msix_state(struct pci_dev *dev)
296{
297	int pos;
298	int irq, head, tail = 0;
299	u16 control;
300	struct pci_cap_saved_state *save_state;
301
302	if (!dev->msix_enabled)
303		return 0;
304
305	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
306	if (pos <= 0)
307		return 0;
308
309	/* save the capability */
310	pci_read_config_word(dev, msi_control_reg(pos), &control);
311	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
312		GFP_KERNEL);
313	if (!save_state) {
314		printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
315		return -ENOMEM;
316	}
317	*((u16 *)&save_state->data[0]) = control;
318
319	/* save the table */
320	irq = head = dev->first_msi_irq;
321	while (head != tail) {
322		struct msi_desc *entry;
323
324		entry = get_irq_msi(irq);
325		read_msi_msg(irq, &entry->msg_save);
326
327		tail = entry->link.tail;
328		irq = tail;
329	}
330
331	save_state->cap_nr = PCI_CAP_ID_MSIX;
332	pci_add_saved_cap(dev, save_state);
333	return 0;
334}
335
336int pci_save_msi_state(struct pci_dev *dev)
337{
338	int rc;
339
340	rc = __pci_save_msi_state(dev);
341	if (rc)
342		return rc;
343
344	rc = __pci_save_msix_state(dev);
345
346	return rc;
347}
348
349static void __pci_restore_msix_state(struct pci_dev *dev)
350{
351	u16 save;
352	int pos;
353	int irq, head, tail = 0;
354	struct msi_desc *entry;
355	struct pci_cap_saved_state *save_state;
356
357	if (!dev->msix_enabled)
358		return;
359
360	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
361	if (!save_state)
362		return;
363	save = *((u16 *)&save_state->data[0]);
364	pci_remove_saved_cap(save_state);
365	kfree(save_state);
366
367	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
368	if (pos <= 0)
369		return;
370
371	/* route the table */
372	pci_intx(dev, 0);		/* disable intx */
373	msix_set_enable(dev, 0);
374	irq = head = dev->first_msi_irq;
375	while (head != tail) {
376		entry = get_irq_msi(irq);
377		write_msi_msg(irq, &entry->msg_save);
378
379		tail = entry->link.tail;
380		irq = tail;
381	}
382
383	pci_write_config_word(dev, msi_control_reg(pos), save);
384}
385
386void pci_restore_msi_state(struct pci_dev *dev)
387{
388	__pci_restore_msi_state(dev);
389	__pci_restore_msix_state(dev);
390}
391#endif	/* CONFIG_PM */
392
393/**
394 * msi_capability_init - configure device's MSI capability structure
395 * @dev: pointer to the pci_dev data structure of MSI device function
396 *
397 * Setup the MSI capability structure of device function with a single
398 * MSI irq, regardless of device function is capable of handling
399 * multiple messages. A return of zero indicates the successful setup
400 * of an entry zero with the new MSI irq or non-zero for otherwise.
401 **/
402static int msi_capability_init(struct pci_dev *dev)
403{
404	struct msi_desc *entry;
405	int pos, irq;
406	u16 control;
407
408	msi_set_enable(dev, 0);	/* Ensure msi is disabled as I set it up */
409
410   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
411	pci_read_config_word(dev, msi_control_reg(pos), &control);
412	/* MSI Entry Initialization */
413	entry = alloc_msi_entry();
414	if (!entry)
415		return -ENOMEM;
416
417	entry->msi_attrib.type = PCI_CAP_ID_MSI;
418	entry->msi_attrib.is_64 = is_64bit_address(control);
419	entry->msi_attrib.entry_nr = 0;
420	entry->msi_attrib.maskbit = is_mask_bit_support(control);
421	entry->msi_attrib.default_irq = dev->irq;	/* Save IOAPIC IRQ */
422	entry->msi_attrib.pos = pos;
423	if (is_mask_bit_support(control)) {
424		entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
425				is_64bit_address(control));
426	}
427	entry->dev = dev;
428	if (entry->msi_attrib.maskbit) {
429		unsigned int maskbits, temp;
430		/* All MSIs are unmasked by default, Mask them all */
431		pci_read_config_dword(dev,
432			msi_mask_bits_reg(pos, is_64bit_address(control)),
433			&maskbits);
434		temp = (1 << multi_msi_capable(control));
435		temp = ((temp - 1) & ~temp);
436		maskbits |= temp;
437		pci_write_config_dword(dev,
438			msi_mask_bits_reg(pos, is_64bit_address(control)),
439			maskbits);
440	}
441	/* Configure MSI capability structure */
442	irq = arch_setup_msi_irq(dev, entry);
443	if (irq < 0) {
444		kmem_cache_free(msi_cachep, entry);
445		return irq;
446	}
447	entry->link.head = irq;
448	entry->link.tail = irq;
449	dev->first_msi_irq = irq;
450	set_irq_msi(irq, entry);
451
452	/* Set MSI enabled bits	 */
453	pci_intx(dev, 0);		/* disable intx */
454	msi_set_enable(dev, 1);
455	dev->msi_enabled = 1;
456
457	dev->irq = irq;
458	return 0;
459}
460
461/**
462 * msix_capability_init - configure device's MSI-X capability
463 * @dev: pointer to the pci_dev data structure of MSI-X device function
464 * @entries: pointer to an array of struct msix_entry entries
465 * @nvec: number of @entries
466 *
467 * Setup the MSI-X capability structure of device function with a
468 * single MSI-X irq. A return of zero indicates the successful setup of
469 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
470 **/
471static int msix_capability_init(struct pci_dev *dev,
472				struct msix_entry *entries, int nvec)
473{
474	struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
475	int irq, pos, i, j, nr_entries, temp = 0;
476	unsigned long phys_addr;
477	u32 table_offset;
478 	u16 control;
479	u8 bir;
480	void __iomem *base;
481
482	msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
483
484   	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
485	/* Request & Map MSI-X table region */
486 	pci_read_config_word(dev, msi_control_reg(pos), &control);
487	nr_entries = multi_msix_capable(control);
488
489 	pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
490	bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
491	table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
492	phys_addr = pci_resource_start (dev, bir) + table_offset;
493	base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
494	if (base == NULL)
495		return -ENOMEM;
496
497	/* MSI-X Table Initialization */
498	for (i = 0; i < nvec; i++) {
499		entry = alloc_msi_entry();
500		if (!entry)
501			break;
502
503 		j = entries[i].entry;
504		entry->msi_attrib.type = PCI_CAP_ID_MSIX;
505		entry->msi_attrib.is_64 = 1;
506		entry->msi_attrib.entry_nr = j;
507		entry->msi_attrib.maskbit = 1;
508		entry->msi_attrib.default_irq = dev->irq;
509		entry->msi_attrib.pos = pos;
510		entry->dev = dev;
511		entry->mask_base = base;
512
513		/* Configure MSI-X capability structure */
514		irq = arch_setup_msi_irq(dev, entry);
515		if (irq < 0) {
516			kmem_cache_free(msi_cachep, entry);
517			break;
518		}
519 		entries[i].vector = irq;
520		if (!head) {
521			entry->link.head = irq;
522			entry->link.tail = irq;
523			head = entry;
524		} else {
525			entry->link.head = temp;
526			entry->link.tail = tail->link.tail;
527			tail->link.tail = irq;
528			head->link.head = irq;
529		}
530		temp = irq;
531		tail = entry;
532
533		set_irq_msi(irq, entry);
534	}
535	if (i != nvec) {
536		int avail = i - 1;
537		i--;
538		for (; i >= 0; i--) {
539			irq = (entries + i)->vector;
540			msi_free_irq(dev, irq);
541			(entries + i)->vector = 0;
542		}
543		/* If we had some success report the number of irqs
544		 * we succeeded in setting up.
545		 */
546		if (avail <= 0)
547			avail = -EBUSY;
548		return avail;
549	}
550	dev->first_msi_irq = entries[0].vector;
551	/* Set MSI-X enabled bits */
552	pci_intx(dev, 0);		/* disable intx */
553	msix_set_enable(dev, 1);
554	dev->msix_enabled = 1;
555
556	return 0;
557}
558
559/**
560 * pci_msi_supported - check whether MSI may be enabled on device
561 * @dev: pointer to the pci_dev data structure of MSI device function
562 *
563 * Look at global flags, the device itself, and its parent busses
564 * to return 0 if MSI are supported for the device.
565 **/
566static
567int pci_msi_supported(struct pci_dev * dev)
568{
569	struct pci_bus *bus;
570
571	/* MSI must be globally enabled and supported by the device */
572	if (!pci_msi_enable || !dev || dev->no_msi)
573		return -EINVAL;
574
575	/* Any bridge which does NOT route MSI transactions from it's
576	 * secondary bus to it's primary bus must set NO_MSI flag on
577	 * the secondary pci_bus.
578	 * We expect only arch-specific PCI host bus controller driver
579	 * or quirks for specific PCI bridges to be setting NO_MSI.
580	 */
581	for (bus = dev->bus; bus; bus = bus->parent)
582		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
583			return -EINVAL;
584
585	return 0;
586}
587
588/**
589 * pci_enable_msi - configure device's MSI capability structure
590 * @dev: pointer to the pci_dev data structure of MSI device function
591 *
592 * Setup the MSI capability structure of device function with
593 * a single MSI irq upon its software driver call to request for
594 * MSI mode enabled on its hardware device function. A return of zero
595 * indicates the successful setup of an entry zero with the new MSI
596 * irq or non-zero for otherwise.
597 **/
598int pci_enable_msi(struct pci_dev* dev)
599{
600	int pos, status;
601
602	if (pci_msi_supported(dev) < 0)
603		return -EINVAL;
604
605	status = msi_init();
606	if (status < 0)
607		return status;
608
609	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
610	if (!pos)
611		return -EINVAL;
612
613	WARN_ON(!!dev->msi_enabled);
614
615	/* Check whether driver already requested for MSI-X irqs */
616	if (dev->msix_enabled) {
617		printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
618			"Device already has MSI-X enabled\n",
619			pci_name(dev));
620		return -EINVAL;
621	}
622	status = msi_capability_init(dev);
623	return status;
624}
625
626void pci_disable_msi(struct pci_dev* dev)
627{
628	struct msi_desc *entry;
629	int default_irq;
630
631	if (!pci_msi_enable)
632		return;
633	if (!dev)
634		return;
635
636	if (!dev->msi_enabled)
637		return;
638
639	msi_set_enable(dev, 0);
640	pci_intx(dev, 1);		/* enable intx */
641	dev->msi_enabled = 0;
642
643	entry = get_irq_msi(dev->first_msi_irq);
644	if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
645		return;
646	}
647	if (irq_has_action(dev->first_msi_irq)) {
648		printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
649		       "free_irq() on MSI irq %d\n",
650		       pci_name(dev), dev->first_msi_irq);
651		BUG_ON(irq_has_action(dev->first_msi_irq));
652	} else {
653		default_irq = entry->msi_attrib.default_irq;
654		msi_free_irq(dev, dev->first_msi_irq);
655
656		/* Restore dev->irq to its default pin-assertion irq */
657		dev->irq = default_irq;
658	}
659	dev->first_msi_irq = 0;
660}
661
662static int msi_free_irq(struct pci_dev* dev, int irq)
663{
664	struct msi_desc *entry;
665	int head, entry_nr, type;
666	void __iomem *base;
667
668	entry = get_irq_msi(irq);
669	if (!entry || entry->dev != dev) {
670		return -EINVAL;
671	}
672	type = entry->msi_attrib.type;
673	entry_nr = entry->msi_attrib.entry_nr;
674	head = entry->link.head;
675	base = entry->mask_base;
676	get_irq_msi(entry->link.head)->link.tail = entry->link.tail;
677	get_irq_msi(entry->link.tail)->link.head = entry->link.head;
678
679	arch_teardown_msi_irq(irq);
680	kmem_cache_free(msi_cachep, entry);
681
682	if (type == PCI_CAP_ID_MSIX) {
683		writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
684			PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
685
686		if (head == irq)
687			iounmap(base);
688	}
689
690	return 0;
691}
692
693/**
694 * pci_enable_msix - configure device's MSI-X capability structure
695 * @dev: pointer to the pci_dev data structure of MSI-X device function
696 * @entries: pointer to an array of MSI-X entries
697 * @nvec: number of MSI-X irqs requested for allocation by device driver
698 *
699 * Setup the MSI-X capability structure of device function with the number
700 * of requested irqs upon its software driver call to request for
701 * MSI-X mode enabled on its hardware device function. A return of zero
702 * indicates the successful configuration of MSI-X capability structure
703 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
704 * Or a return of > 0 indicates that driver request is exceeding the number
705 * of irqs available. Driver should use the returned value to re-send
706 * its request.
707 **/
708int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
709{
710	int status, pos, nr_entries;
711	int i, j;
712	u16 control;
713
714	if (!entries || pci_msi_supported(dev) < 0)
715 		return -EINVAL;
716
717	status = msi_init();
718	if (status < 0)
719		return status;
720
721	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
722	if (!pos)
723 		return -EINVAL;
724
725	pci_read_config_word(dev, msi_control_reg(pos), &control);
726	nr_entries = multi_msix_capable(control);
727	if (nvec > nr_entries)
728		return -EINVAL;
729
730	/* Check for any invalid entries */
731	for (i = 0; i < nvec; i++) {
732		if (entries[i].entry >= nr_entries)
733			return -EINVAL;		/* invalid entry */
734		for (j = i + 1; j < nvec; j++) {
735			if (entries[i].entry == entries[j].entry)
736				return -EINVAL;	/* duplicate entry */
737		}
738	}
739	WARN_ON(!!dev->msix_enabled);
740
741	/* Check whether driver already requested for MSI irq */
742   	if (dev->msi_enabled) {
743		printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
744		       "Device already has an MSI irq assigned\n",
745		       pci_name(dev));
746		return -EINVAL;
747	}
748	status = msix_capability_init(dev, entries, nvec);
749	return status;
750}
751
752void pci_disable_msix(struct pci_dev* dev)
753{
754	int irq, head, tail = 0, warning = 0;
755
756	if (!pci_msi_enable)
757		return;
758	if (!dev)
759		return;
760
761	if (!dev->msix_enabled)
762		return;
763
764	msix_set_enable(dev, 0);
765	pci_intx(dev, 1);		/* enable intx */
766	dev->msix_enabled = 0;
767
768	irq = head = dev->first_msi_irq;
769	while (head != tail) {
770		tail = get_irq_msi(irq)->link.tail;
771		if (irq_has_action(irq))
772			warning = 1;
773		else if (irq != head)	/* Release MSI-X irq */
774			msi_free_irq(dev, irq);
775		irq = tail;
776	}
777	msi_free_irq(dev, irq);
778	if (warning) {
779		printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
780			"free_irq() on all MSI-X irqs\n",
781			pci_name(dev));
782		BUG_ON(warning > 0);
783	}
784	dev->first_msi_irq = 0;
785}
786
787/**
788 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
789 * @dev: pointer to the pci_dev data structure of MSI(X) device function
790 *
791 * Being called during hotplug remove, from which the device function
792 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
793 * allocated for this device function, are reclaimed to unused state,
794 * which may be used later on.
795 **/
796void msi_remove_pci_irq_vectors(struct pci_dev* dev)
797{
798	if (!pci_msi_enable || !dev)
799 		return;
800
801	if (dev->msi_enabled) {
802		if (irq_has_action(dev->first_msi_irq)) {
803			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
804			       "called without free_irq() on MSI irq %d\n",
805			       pci_name(dev), dev->first_msi_irq);
806			BUG_ON(irq_has_action(dev->first_msi_irq));
807		} else /* Release MSI irq assigned to this device */
808			msi_free_irq(dev, dev->first_msi_irq);
809	}
810	if (dev->msix_enabled) {
811		int irq, head, tail = 0, warning = 0;
812		void __iomem *base = NULL;
813
814		irq = head = dev->first_msi_irq;
815		while (head != tail) {
816			tail = get_irq_msi(irq)->link.tail;
817			base = get_irq_msi(irq)->mask_base;
818			if (irq_has_action(irq))
819				warning = 1;
820			else if (irq != head) /* Release MSI-X irq */
821				msi_free_irq(dev, irq);
822			irq = tail;
823		}
824		msi_free_irq(dev, irq);
825		if (warning) {
826			iounmap(base);
827			printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
828			       "called without free_irq() on all MSI-X irqs\n",
829			       pci_name(dev));
830			BUG_ON(warning > 0);
831		}
832	}
833}
834
835void pci_no_msi(void)
836{
837	pci_msi_enable = 0;
838}
839
840EXPORT_SYMBOL(pci_enable_msi);
841EXPORT_SYMBOL(pci_disable_msi);
842EXPORT_SYMBOL(pci_enable_msix);
843EXPORT_SYMBOL(pci_disable_msix);
844